id,original,modified 12090,"def test_lookup_table_with_empty_values(): assert lookup_table((), key=lambda x: x) == {} ","def test_lookup_table_with_empty_values(): assert lookup_table((), operator.itemgetter(0)) == {} " 29094,"def get_message_count(thread_id): """"""Fetches the messages count of the given thread. Args: thread_id: str. Returns: int. The count of messages of the given thread. """""" return feedback_models.GeneralFeedbackMessageModel.get_message_count( thread_id) ","def get_message_count(thread_id): """"""Fetches the messages count of the given thread. Args: thread_id: str. The ID of the thread to count messages in. Returns: int. The count of messages of the given thread. """""" return feedback_models.GeneralFeedbackMessageModel.get_message_count( thread_id) " 37797,"def test_unknown_platform_on_ci(monkeypatch, capsys): monkeypatch.setattr(os, 'environ', {""CI"": ""true""}) apply_mock_protection(monkeypatch) monkeypatch.setattr(sys, ""platform"", ""Something"") with pytest.raises(SystemExit) as exit: main() _, err = capsys.readouterr() assert exit.value.code == 2 assert 'cibuildwheel: Unable to detect platform from ""sys.platform""' in err ","def test_unknown_platform_on_ci(monkeypatch, capsys): monkeypatch.setattr(os, 'environ', {""CI"": ""true""}) apply_mock_protection(monkeypatch) monkeypatch.setattr(sys, ""platform"", ""an-unknown-platform"") with pytest.raises(SystemExit) as exit: main() _, err = capsys.readouterr() assert exit.value.code == 2 assert 'cibuildwheel: Unable to detect platform from ""sys.platform""' in err " 10116,"def deprecation_warning(module): deprecated_aliases = ['private_token'] for aliase in deprecated_aliases: if aliase in module.params: module.deprecate(""Aliases \'{aliase}\' is deprecated"".format(aliase=aliase), ""2.10"") ","def deprecation_warning(module): deprecated_aliases = ['private_token'] for aliase in deprecated_aliases: if aliase in module.params: module.deprecate(""Alias \'{aliase}\' is deprecated"".format(aliase=aliase), ""2.10"") " 57544,"def asyncio_setup(reload: bool = False) -> None: # pragma: no cover if sys.version_info >= (3, 8) and sys.platform == ""win32"": if reload: logger.warning( ""The --reload flag should not be \ used in production on Windows."" ) asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) ","def asyncio_setup(reload: bool = False) -> None: # pragma: no cover if sys.version_info >= (3, 8) and sys.platform == ""win32"": if reload: logger.warning( ""The --reload flag should not be "" ""used in production on Windows."" ) asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) " 50026,"def test_retrieve_watched_dirs(): dirs = amtypes.retrieve_watched_dirs() assert len(list(dirs)) == len(EXPECTED_DIRS) for dir_ in dirs: if dir_ not in EXPECTED_DIRS: assert ( False ), ""Unexpected watched directory in transfer watched directories: {}"".format( dir_ ) ","def test_retrieve_watched_dirs(): dirs = amtypes.retrieve_watched_dirs() assert len(list(dirs)) == len(EXPECTED_DIRS) for dir_ in dirs: assert dir_ in EXPECTED_DIRS, ""Unexpected watched directory in transfer watched directories: {}"".format( dir_ ) " 37643,"def _text_circuit_drawer( circuit, filename=None, reverse_bits=False, plot_barriers=True, justify=None, vertical_compression=""high"", idle_wires=True, with_layout=True, fold=None, initial_state=True, cregbundle=False, encoding=None, wire_order=None, ): """"""Draws a circuit using ascii art. Args: circuit (QuantumCircuit): Input circuit filename (str): Optional filename to write the result reverse_bits (bool): Rearrange the bits in reverse order. plot_barriers (bool): Draws the barriers when they are there. justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how the circuit should be justified. vertical_compression (string): `high`, `medium`, or `low`. It merges the lines so the drawing will take less vertical room. Default is `high`. idle_wires (bool): Include idle wires. Default is True. with_layout (bool): Include layout information with labels on the physical layout. Default: True fold (int): Optional. Breaks the circuit drawing to this length. This is useful when the drawing does not fit in the console. If None (default), it will try to guess the console width using `shutil.get_terminal_size()`. If you don't want pagination at all, set `fold=-1`. initial_state (bool): Optional. Adds |0> in the beginning of the line. Default: `False`. cregbundle (bool): Optional. If set True, bundle classical registers. Default: ``True``. encoding (str): Optional. Sets the encoding preference of the output. Default: ``sys.stdout.encoding``. wire_order (list): optional. A list of integers used to reorder the display of the bits. The list must have an entry for every bit with the bits in the range 0 to (num_qubits + num_clbits). Returns: TextDrawing: An instance that, when printed, draws the circuit in ascii art. Raises: VisualizationError: When the filename extenstion is not .txt. """""" qubits, clbits, nodes = utils._get_layered_instructions( circuit, reverse_bits=reverse_bits, justify=justify, idle_wires=idle_wires, wire_order=wire_order, ) text_drawing = _text.TextDrawing( qubits, clbits, nodes, reverse_bits=reverse_bits, layout=None, initial_state=initial_state, cregbundle=cregbundle, global_phase=None, encoding=encoding, qregs=None, cregs=None, with_layout=with_layout, circuit=circuit, ) text_drawing.plotbarriers = plot_barriers text_drawing.line_length = fold text_drawing.vertical_compression = vertical_compression if filename: text_drawing.dump(filename, encoding=encoding) return text_drawing ","def _text_circuit_drawer( circuit, filename=None, reverse_bits=False, plot_barriers=True, justify=None, vertical_compression=""high"", idle_wires=True, with_layout=True, fold=None, initial_state=True, cregbundle=False, encoding=None, wire_order=None, ): """"""Draws a circuit using ascii art. Args: circuit (QuantumCircuit): Input circuit filename (str): Optional filename to write the result reverse_bits (bool): Rearrange the bits in reverse order. plot_barriers (bool): Draws the barriers when they are there. justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how the circuit should be justified. vertical_compression (string): `high`, `medium`, or `low`. It merges the lines so the drawing will take less vertical room. Default is `high`. idle_wires (bool): Include idle wires. Default is True. with_layout (bool): Include layout information with labels on the physical layout. Default: True fold (int): Optional. Breaks the circuit drawing to this length. This is useful when the drawing does not fit in the console. If None (default), it will try to guess the console width using `shutil.get_terminal_size()`. If you don't want pagination at all, set `fold=-1`. initial_state (bool): Optional. Adds |0> in the beginning of the line. Default: `False`. cregbundle (bool): Optional. If set True, bundle classical registers. Default: ``True``. encoding (str): Optional. Sets the encoding preference of the output. Default: ``sys.stdout.encoding``. wire_order (list): Optional. A list of integers used to reorder the display of the bits. The list must have an entry for every bit with the bits in the range 0 to (num_qubits + num_clbits). Returns: TextDrawing: An instance that, when printed, draws the circuit in ascii art. Raises: VisualizationError: When the filename extenstion is not .txt. """""" qubits, clbits, nodes = utils._get_layered_instructions( circuit, reverse_bits=reverse_bits, justify=justify, idle_wires=idle_wires, wire_order=wire_order, ) text_drawing = _text.TextDrawing( qubits, clbits, nodes, reverse_bits=reverse_bits, layout=None, initial_state=initial_state, cregbundle=cregbundle, global_phase=None, encoding=encoding, qregs=None, cregs=None, with_layout=with_layout, circuit=circuit, ) text_drawing.plotbarriers = plot_barriers text_drawing.line_length = fold text_drawing.vertical_compression = vertical_compression if filename: text_drawing.dump(filename, encoding=encoding) return text_drawing " 32230,"def rasterize_html_command(): entry_id = demisto.args().get('EntryID') w = demisto.args().get('width', DEFAULT_W).rstrip('px') h = demisto.args().get('height', DEFAULT_H).rstrip('px') r_type = demisto.args().get('type', 'png') file_name = demisto.args().get('file_name', 'email') file_name = f'{file_name}.{""pdf"" if r_type.lower() == ""pdf"" else ""png""}' # type: ignore file_path = demisto.getFilePath(entry_id).get('path') with open(file_path, 'rb') as f: output = rasterize(path=f'file://{os.path.realpath(f.name)}', width=w, height=h, r_type=r_type) res = fileResult(filename=file_name, data=output) if r_type == 'png': res['Type'] = entryTypes['image'] demisto.results(res) ","def rasterize_html_command(): entry_id = demisto.args().get('EntryID') w = demisto.args().get('width', DEFAULT_W).rstrip('px') h = demisto.args().get('height', DEFAULT_H).rstrip('px') r_type = demisto.args().get('type', 'png') file_name = demisto.args().get('file_name', 'email') file_name = f'{file_name}.{""pdf"" if r_type.lower() == ""pdf"" else ""png""}' # type: ignore file_path = demisto.getFilePath(entry_id).get('path') with open(file_path, 'rb') as f: output = rasterize(path=f'file://{os.path.realpath(f.name)}', width=w, height=h, r_type=r_type) res = fileResult(filename=file_name, data=output) if r_type == 'png': res['Type'] = entryTypes['image'] return_results(res) " 45691,"def ManhattanPlot( dataframe, chrm=""CHR"", bp=""BP"", p=""P"", snp=""SNP"", gene=""GENE"", annotation=None, logp=True, title=""Manhattan Plot"", showgrid=True, xlabel=None, ylabel='-log10(p)', point_size=5, showlegend=True, col=None, suggestiveline_value=-np.log10(1e-8), suggestiveline_color='#636efa', suggestiveline_width=1, genomewideline_value=-np.log10(5e-8), genomewideline_color='#EF553B', genomewideline_width=1, highlight=True, highlight_color=""red"", ): """"""Returns a figure for a manhattan plot. Keyword arguments: - dataframe (dataframe; required): A pandas dataframe which must contain at least the following three columns: - the chromosome number - genomic base-pair position - a numeric quantity to plot such as a p-value or zscore - chrm (string; default 'CHR'): A string denoting the column name for the chromosome. This column must be float or integer. Minimum number of chromosomes required is 1. If you have X, Y, or MT chromosomes, be sure to renumber these 23, 24, 25, etc. - bp (string; default 'BP'): A string denoting the column name for the chromosomal position. - p (string; default 'P'): A string denoting the column name for the float quantity to be plotted on the y-axis. This column must be numeric. This does not have to be a p-value. It can be any numeric quantity such as peak heights, bayes factors, test statistics. If it is not a p-value, make sure to set logp = FALSE. - snp (string; default 'SNP'): A string denoting the column name for the SNP names (e.g. rs number). More generally, this column could be anything that identifies each point being plotted. For example, in an Epigenomewide association study (EWAS) this could be the probe name or cg number. This column should be a character. This argument is optional, however it is necessary to specify if you want to highlight points on the plot using the highlight argument in the figure method. - gene (string; default 'GENE'): A string denoting the column name for the GENE names. This column could be a string or a float. More generally this could be any annotation information that you want to include in the plot. - annotation (string; optional): A string denoting the column name for an annotation. This column could be a string or a float. This could be any annotation information that you want to include in the plot (e.g. zscore, effect size, minor allele frequency). - logp (bool; optional): If True, the -log10 of the p-value is plotted. It isn't very useful to plot raw p-values; however, plotting the raw value could be useful for other genome-wide plots (e.g., peak heights, bayes factors, test statistics, other ""scores"", etc.) - title (string; default 'Manhattan Plot') The title of the graph. - showgrid (bool; default true): Boolean indicating whether gridlines should be shown. - xlabel (string; optional): Label of the x axis. - ylabel: (string; default '-log10(p)'): Label of the y axis. - point_size (number; default 5): Size of the points of the Scatter plot. - showlegend (bool; default true): Boolean indicating whether legends should be shown. - col (string; optional): A string representing the color of the points of the Scatter plot. Can be in any color format accepted by plotly_js graph_objs. - suggestiveline_value (bool | float; default 8): A value which must be False to deactivate the option, or a numerical value corresponding to the p-value at which the line should be drawn. The line has no influence on the data points. - suggestiveline_color (string; default 'grey'): Color of the suggestive line. - suggestiveline_width (number; default 2): Width of the suggestive line. - genomewideline_value (bool | float; default -log10(5e-8)): A boolean which must be False to deactivate the option, or a numerical value corresponding to the p-value above which the data points are considered significant. - genomewideline_color (string; default 'red'): Color of the genome wide line. Can be in any color format accepted by plotly_js graph_objs. - genomewideline_width (number; default 1): Width of the genome wide line. - highlight (bool; default true): turning on/off the highlighting of data points considered significant. - highlight_color (string; default 'red'): Color of the data points highlighted because they are significant Can be in any color format accepted by plotly_js graph_objs. # ... Example 1: Random Manhattan Plot ''' dataframe = pd.DataFrame( np.random.randint(0,100,size=(100, 3)), columns=['P', 'CHR', 'BP']) fig = create_manhattan(dataframe, title='XYZ Manhattan plot') plotly.offline.plot(fig, image='png') ''' """""" mh = _ManhattanPlot( dataframe, chrm=chrm, bp=bp, p=p, snp=snp, gene=gene, annotation=annotation, logp=logp ) return mh.figure( title=title, showgrid=showgrid, xlabel=xlabel, ylabel=ylabel, point_size=point_size, showlegend=showlegend, col=col, suggestiveline_value=suggestiveline_value, suggestiveline_color=suggestiveline_color, suggestiveline_width=suggestiveline_width, genomewideline_value=genomewideline_value, genomewideline_color=genomewideline_color, genomewideline_width=genomewideline_width, highlight=highlight, highlight_color=highlight_color ) ","def ManhattanPlot( dataframe, chrm=""CHR"", bp=""BP"", p=""P"", snp=""SNP"", gene=""GENE"", annotation=None, logp=True, title=""Manhattan Plot"", showgrid=True, xlabel=None, ylabel='-log10(p)', point_size=5, showlegend=True, col=None, suggestiveline_value=-np.log10(1e-8), suggestiveline_color='#636efa', suggestiveline_width=1, genomewideline_value=-np.log10(5e-8), genomewideline_color='#EF553B', genomewideline_width=1, highlight=True, highlight_color=""red"", ): """"""Returns a figure for a manhattan plot. Keyword arguments: - dataframe (dataframe; required): A pandas dataframe which must contain at least the following three columns: - the chromosome number - genomic base-pair position - a numeric quantity to plot such as a p-value or zscore - chrm (string; default 'CHR'): A string denoting the column name for the chromosome. This column must be float or integer. Minimum number of chromosomes required is 1. If you have X, Y, or MT chromosomes, be sure to renumber these 23, 24, 25, etc. - bp (string; default 'BP'): A string denoting the column name for the chromosomal position. - p (string; default 'P'): A string denoting the column name for the float quantity to be plotted on the y-axis. This column must be numeric. This does not have to be a p-value. It can be any numeric quantity such as peak heights, bayes factors, test statistics. If it is not a p-value, make sure to set logp = FALSE. - snp (string; default 'SNP'): A string denoting the column name for the SNP names (e.g., rs number). More generally, this column could be anything that identifies each point being plotted. For example, in an Epigenomewide association study (EWAS) this could be the probe name or cg number. This column should be a character. This argument is optional, however it is necessary to specify if you want to highlight points on the plot using the highlight argument in the figure method. - gene (string; default 'GENE'): A string denoting the column name for the GENE names. This column could be a string or a float. More generally this could be any annotation information that you want to include in the plot. - annotation (string; optional): A string denoting the column name for an annotation. This column could be a string or a float. This could be any annotation information that you want to include in the plot (e.g. zscore, effect size, minor allele frequency). - logp (bool; optional): If True, the -log10 of the p-value is plotted. It isn't very useful to plot raw p-values; however, plotting the raw value could be useful for other genome-wide plots (e.g., peak heights, bayes factors, test statistics, other ""scores"", etc.) - title (string; default 'Manhattan Plot') The title of the graph. - showgrid (bool; default true): Boolean indicating whether gridlines should be shown. - xlabel (string; optional): Label of the x axis. - ylabel: (string; default '-log10(p)'): Label of the y axis. - point_size (number; default 5): Size of the points of the Scatter plot. - showlegend (bool; default true): Boolean indicating whether legends should be shown. - col (string; optional): A string representing the color of the points of the Scatter plot. Can be in any color format accepted by plotly_js graph_objs. - suggestiveline_value (bool | float; default 8): A value which must be False to deactivate the option, or a numerical value corresponding to the p-value at which the line should be drawn. The line has no influence on the data points. - suggestiveline_color (string; default 'grey'): Color of the suggestive line. - suggestiveline_width (number; default 2): Width of the suggestive line. - genomewideline_value (bool | float; default -log10(5e-8)): A boolean which must be False to deactivate the option, or a numerical value corresponding to the p-value above which the data points are considered significant. - genomewideline_color (string; default 'red'): Color of the genome wide line. Can be in any color format accepted by plotly_js graph_objs. - genomewideline_width (number; default 1): Width of the genome wide line. - highlight (bool; default true): turning on/off the highlighting of data points considered significant. - highlight_color (string; default 'red'): Color of the data points highlighted because they are significant Can be in any color format accepted by plotly_js graph_objs. # ... Example 1: Random Manhattan Plot ''' dataframe = pd.DataFrame( np.random.randint(0,100,size=(100, 3)), columns=['P', 'CHR', 'BP']) fig = create_manhattan(dataframe, title='XYZ Manhattan plot') plotly.offline.plot(fig, image='png') ''' """""" mh = _ManhattanPlot( dataframe, chrm=chrm, bp=bp, p=p, snp=snp, gene=gene, annotation=annotation, logp=logp ) return mh.figure( title=title, showgrid=showgrid, xlabel=xlabel, ylabel=ylabel, point_size=point_size, showlegend=showlegend, col=col, suggestiveline_value=suggestiveline_value, suggestiveline_color=suggestiveline_color, suggestiveline_width=suggestiveline_width, genomewideline_value=genomewideline_value, genomewideline_color=genomewideline_color, genomewideline_width=genomewideline_width, highlight=highlight, highlight_color=highlight_color ) " 28143,"def abstractmethod(funcobj): """"""A decorator indicating abstract methods. This is heavily inspired by the decorator of the same name in the ABC standard library. But we make our own version because we actually want to allow the class with the abstract method to be instantiated and we will use this property to detect if the method is abstract and should be overwritten. """""" funcobj.__qcodesisabstractmethod__ = True return funcobj ","def abstractmethod(funcobj): """"""A decorator indicating abstract methods. This is heavily inspired by the decorator of the same name in the ABC standard library. But we make our own version because we actually want to allow the class with the abstract method to be instantiated and we will use this property to detect if the method is abstract and should be overwritten. """""" funcobj.__qcodes_is_abstract_method__ = True return funcobj " 14414,"def test_orbit_elevation(): # From example 5.9 Howard Curtis fourth edition. r = np.array([-2032.4, 4591.2, -4544.8]) << u.km v = ( np.array([100, 50, 100]) << u.km / u.s ) # Not relevant for elevation calculation. epoch = Time(""2022-01-01"") # Not relevant. body = Earth orbit = Orbit.from_vectors(body, r, v, epoch) lat = -40 << u.deg theta = 110 << u.deg # Local sidereal time. h = 0 * u.m elevation = orbit.elevation(lat, theta, h) expected_elevation = 41.41 << u.deg assert_quantity_allclose(elevation, expected_elevation, atol=2e-4 * u.rad) ","def test_orbit_elevation(): # From example 5.9 Howard Curtis fourth edition. r = [-2032.4, 4591.2, -4544.8] << u.km v = ( np.array([100, 50, 100]) << u.km / u.s ) # Not relevant for elevation calculation. epoch = Time(""2022-01-01"") # Not relevant. body = Earth orbit = Orbit.from_vectors(body, r, v, epoch) lat = -40 << u.deg theta = 110 << u.deg # Local sidereal time. h = 0 * u.m elevation = orbit.elevation(lat, theta, h) expected_elevation = 41.41 << u.deg assert_quantity_allclose(elevation, expected_elevation, atol=2e-4 * u.rad) " 17745,"def from_gmso(topology, compound=None, coords_only=False, infer_hierarchy=True): """"""Convert a GMSO Topology to mBuild Compound. Parameter --------- topology : gmso.Topology The GMSO Topology to be converted. compound : mb.Compound, optional, default=None Host mb.Compound that we are loading to. coords_only : bool, optional, default=False Set preexisting atoms in compound to coordinates given by Topology. infer_hierarchy : bool, optional, default=True If True, infer compound hierarchy from Topology residue, to be implemented. Returns ------- compound : mb.Compound """""" import unyt as u if compound and coords_only: if topology.n_sites != compound.n_particles: raise ValueError( f""Number of sites in {topology} does not match {compound}"" f""Topology: {topology.n_sites} sites"" f""Compound: {compound.n_particles} particles"" ) atoms_particles = zip( topology.sites, compound.particles(include_ports=False) ) if None in compound._particles(include_ports=None): raise ValueError(""Some particles are None"") for site, particle in atoms_particles: particle.pos = np.array(site.position.to(u.nm).value) return compound elif not compound and coords_only: raise MBuildError(""coords_only=True but host compound is not provided"") # Initialize a compound if none is provided if not compound: compound = mb.Compound() # Convert gmso Topology to mbuild Compound from gmso.external.convert_mbuild import to_mbuild if not compound: return to_mbuild(topology) else: compound.add(to_mbuild(topology)) return compound ","def from_gmso(topology, compound=None, coords_only=False, infer_hierarchy=True): """"""Convert a GMSO Topology to mBuild Compound. Parameter --------- topology : gmso.Topology The GMSO Topology to be converted. compound : mb.Compound, optional, default=None Host mb.Compound that we are loading to. coords_only : bool, optional, default=False Set preexisting atoms in compound to coordinates given by Topology. infer_hierarchy : bool, optional, default=True If True, infer compound hierarchy from Topology residue, to be implemented. Returns ------- compound : mb.Compound """""" import unyt as u if compound and coords_only: if topology.n_sites != compound.n_particles: raise ValueError( f""Number of sites in {topology} does not match {compound}"" f""Topology: {topology.n_sites} sites"" f""Compound: {compound.n_particles} particles"" ) atoms_particles = zip( topology.sites, compound.particles(include_ports=False) ) if None in compound._particles(include_ports=False): raise ValueError(""Some particles are None"") for site, particle in atoms_particles: particle.pos = np.array(site.position.to(u.nm).value) return compound elif not compound and coords_only: raise MBuildError(""coords_only=True but host compound is not provided"") # Initialize a compound if none is provided if not compound: compound = mb.Compound() # Convert gmso Topology to mbuild Compound from gmso.external.convert_mbuild import to_mbuild if not compound: return to_mbuild(topology) else: compound.add(to_mbuild(topology)) return compound " 11449,"def main(generate_input, generate_output): with open(generate_input, ""r"") as reader: data = json.load(reader) sdk_folder = ""."" result = {""packages"": []} for package in data.values(): package_name = package[""packageName""] # Changelog last_version = [""first release""] if 'azure-mgmt-' in package_name: md_output = change_log_generate(package_name, last_version) else: md_output = ""data-plan skip changelog generation temporarily"" package[""changelog""] = { ""content"": md_output, ""hasBreakingChange"": ""Breaking changes"" in md_output, ""breakingChangeItems"": extract_breaking_change(md_output), } _LOGGER.info(f""[PACKAGE]({package_name})[CHANGELOG]:{md_output}"") # Built package create_package(package_name) folder_name = package[""path""][0] dist_path = Path(sdk_folder, folder_name, package_name, ""dist"") package[""artifacts""] = [str(dist_path / package_file) for package_file in os.listdir(dist_path)] package[""result""] = ""succeeded"" # to distinguish with track1 package[""packageName""] = ""track2_"" + package[""packageName""] package[""packageFolder""] = package[""path""][0] result[""packages""].append(package) with open(generate_output, ""w"") as writer: json.dump(result, writer) ","def main(generate_input, generate_output): with open(generate_input, ""r"") as reader: data = json.load(reader) sdk_folder = ""."" result = {""packages"": []} for package in data.values(): package_name = package[""packageName""] # Changelog last_version = [""first release""] if 'azure-mgmt-' in package_name: md_output = change_log_generate(package_name, last_version) else: md_output = ""data-plan skip changelog generation temporarily"" package[""changelog""] = { ""content"": md_output, ""hasBreakingChange"": ""Breaking changes"" in md_output, ""breakingChangeItems"": extract_breaking_change(md_output), } package[""version""] = last_version[-1] _LOGGER.info(f""[PACKAGE]({package_name})[CHANGELOG]:{md_output}"") # Built package create_package(package_name) folder_name = package[""path""][0] dist_path = Path(sdk_folder, folder_name, package_name, ""dist"") package[""artifacts""] = [str(dist_path / package_file) for package_file in os.listdir(dist_path)] package[""result""] = ""succeeded"" # to distinguish with track1 package[""packageName""] = ""track2_"" + package[""packageName""] package[""packageFolder""] = package[""path""][0] result[""packages""].append(package) with open(generate_output, ""w"") as writer: json.dump(result, writer) " 29001,"def run_monkey_island(): island_args = parse_cli_args() config_options, server_config_path = _setup_data_dir(island_args) _exit_on_invalid_config_options(config_options) _configure_logging(config_options) _initialize_globals(config_options, server_config_path) mongo_db_process = None if config_options.start_mongodb: mongo_db_process = start_mongodb(config_options.data_dir) register_mongo_shutdown_callback(mongo_db_process) try: connect_to_mongodb(mongo_db_process) except MongoDbProcessException: logger.error( f""MongoDB could not start. For details, check the MongoDB log at "" f""{mongo_db_process.get_log_file()}"" ) sys.exit(-1) _configure_gevent_exception_handling(Path(config_options.data_dir)) _start_island_server(island_args.setup_only, config_options) ","def run_monkey_island(): island_args = parse_cli_args() config_options, server_config_path = _setup_data_dir(island_args) _exit_on_invalid_config_options(config_options) _configure_logging(config_options) _initialize_globals(config_options, server_config_path) mongo_db_process = None if config_options.start_mongodb: mongo_db_process = start_mongodb(config_options.data_dir) register_mongo_shutdown_callback(mongo_db_process) try: connect_to_mongodb(mongo_db_process) except MongoDbProcessException: logger.error( f""MongoDB could not start. For details, check the MongoDB log at "" f""{mongo_db_process.get_log_file()}"" ) sys.exit(1) _configure_gevent_exception_handling(Path(config_options.data_dir)) _start_island_server(island_args.setup_only, config_options) " 2538,"def test_pairwise_distances_argmin_min(): # Check pairwise minimum distances computation for any metric X = [[0], [1]] Y = [[-2], [3]] Xsp = dok_matrix(X) Ysp = csr_matrix(Y, dtype=np.float32) expected_idx = [0, 1] expected_vals = [2, 2] expected_vals_sq = [4, 4] # euclidean metric idx, vals = pairwise_distances_argmin_min(X, Y, metric=""euclidean"") idx2 = pairwise_distances_argmin(X, Y, metric=""euclidean"") assert_array_almost_equal(idx, expected_idx) assert_array_almost_equal(idx2, expected_idx) assert_array_almost_equal(vals, expected_vals) # sparse matrix case idxsp, valssp = pairwise_distances_argmin_min(Xsp, Ysp, metric=""euclidean"") assert_array_almost_equal(idxsp, expected_idx) assert_array_almost_equal(valssp, expected_vals) # We don't want np.matrix here assert type(idxsp) == np.ndarray assert type(valssp) == np.ndarray # euclidean metric squared idx, vals = pairwise_distances_argmin_min( X, Y, metric=""euclidean"", metric_kwargs={""squared"": True} ) assert_array_almost_equal(idx, expected_idx) assert_array_almost_equal(vals, expected_vals_sq) # Non-euclidean scikit-learn metric idx, vals = pairwise_distances_argmin_min(X, Y, metric=""manhattan"") idx2 = pairwise_distances_argmin(X, Y, metric=""manhattan"") assert_array_almost_equal(idx, expected_idx) assert_array_almost_equal(idx2, expected_idx) assert_array_almost_equal(vals, expected_vals) # sparse matrix case idxsp, valssp = pairwise_distances_argmin_min(Xsp, Ysp, metric=""manhattan"") assert_array_almost_equal(idxsp, expected_idx) assert_array_almost_equal(valssp, expected_vals) # Non-euclidean Scipy distance (callable) idx, vals = pairwise_distances_argmin_min( X, Y, metric=minkowski, metric_kwargs={""p"": 2} ) assert_array_almost_equal(idx, expected_idx) assert_array_almost_equal(vals, expected_vals) # Non-euclidean Scipy distance (string) idx, vals = pairwise_distances_argmin_min( X, Y, metric=""minkowski"", metric_kwargs={""p"": 2} ) assert_array_almost_equal(idx, expected_idx) assert_array_almost_equal(vals, expected_vals) # Compare with naive implementation rng = np.random.RandomState(0) X = rng.randn(97, 149) Y = rng.randn(111, 149) dist = pairwise_distances(X, Y, metric=""manhattan"") dist_orig_ind = dist.argmin(axis=0) dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))] dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min( X, Y, axis=0, metric=""manhattan"" ) np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7) np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7) # Changing the axe and permuting datasets must give the same results argmin_0, dist_0 = pairwise_distances_argmin_min(X, Y, axis=0) argmin_1, dist_1 = pairwise_distances_argmin_min(Y, X, axis=1) assert_array_equal(dist_0, dist_1) assert_array_equal(argmin_0, argmin_1) argmin_0, dist_0 = pairwise_distances_argmin_min(X, X, axis=0) argmin_1, dist_1 = pairwise_distances_argmin_min(X, X, axis=1) assert_array_equal(dist_0, dist_1) assert_array_equal(argmin_0, argmin_1) # Changing the axe and permuting datasets must give the same results argmin_0 = pairwise_distances_argmin(X, Y, axis=0) argmin_1 = pairwise_distances_argmin(Y, X, axis=1) assert_array_equal(argmin_0, argmin_1) argmin_0 = pairwise_distances_argmin_min(X, X, axis=0) argmin_1 = pairwise_distances_argmin_min(X, X, axis=1) assert_array_equal(argmin_0, argmin_1) ","def test_pairwise_distances_argmin_min(): # Check pairwise minimum distances computation for any metric X = [[0], [1]] Y = [[-2], [3]] Xsp = dok_matrix(X) Ysp = csr_matrix(Y, dtype=np.float32) expected_idx = [0, 1] expected_vals = [2, 2] expected_vals_sq = [4, 4] # euclidean metric idx, vals = pairwise_distances_argmin_min(X, Y, metric=""euclidean"") idx2 = pairwise_distances_argmin(X, Y, metric=""euclidean"") assert_array_almost_equal(idx, expected_idx) assert_array_almost_equal(idx2, expected_idx) assert_array_almost_equal(vals, expected_vals) # sparse matrix case idxsp, valssp = pairwise_distances_argmin_min(Xsp, Ysp, metric=""euclidean"") assert_array_almost_equal(idxsp, expected_idx) assert_array_almost_equal(valssp, expected_vals) # We don't want np.matrix here assert type(idxsp) == np.ndarray assert type(valssp) == np.ndarray # euclidean metric squared idx, vals = pairwise_distances_argmin_min( X, Y, metric=""euclidean"", metric_kwargs={""squared"": True} ) assert_array_almost_equal(idx, expected_idx) assert_array_almost_equal(vals, expected_vals_sq) # Non-euclidean scikit-learn metric idx, vals = pairwise_distances_argmin_min(X, Y, metric=""manhattan"") idx2 = pairwise_distances_argmin(X, Y, metric=""manhattan"") assert_array_almost_equal(idx, expected_idx) assert_array_almost_equal(idx2, expected_idx) assert_array_almost_equal(vals, expected_vals) # sparse matrix case idxsp, valssp = pairwise_distances_argmin_min(Xsp, Ysp, metric=""manhattan"") assert_array_almost_equal(idxsp, expected_idx) assert_array_almost_equal(valssp, expected_vals) # Non-euclidean Scipy distance (callable) idx, vals = pairwise_distances_argmin_min( X, Y, metric=minkowski, metric_kwargs={""p"": 2} ) assert_array_almost_equal(idx, expected_idx) assert_array_almost_equal(vals, expected_vals) # Non-euclidean Scipy distance (string) idx, vals = pairwise_distances_argmin_min( X, Y, metric=""minkowski"", metric_kwargs={""p"": 2} ) assert_array_almost_equal(idx, expected_idx) assert_array_almost_equal(vals, expected_vals) # Compare with naive implementation rng = np.random.RandomState(0) X = rng.randn(97, 149) Y = rng.randn(111, 149) dist = pairwise_distances(X, Y, metric=""manhattan"") dist_orig_ind = dist.argmin(axis=0) dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))] dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min( X, Y, axis=0, metric=""manhattan"" ) np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7) np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7) # Changing the axis and permuting datasets must give the same results argmin_0, dist_0 = pairwise_distances_argmin_min(X, Y, axis=0) argmin_1, dist_1 = pairwise_distances_argmin_min(Y, X, axis=1) assert_array_equal(dist_0, dist_1) assert_array_equal(argmin_0, argmin_1) argmin_0, dist_0 = pairwise_distances_argmin_min(X, X, axis=0) argmin_1, dist_1 = pairwise_distances_argmin_min(X, X, axis=1) assert_array_equal(dist_0, dist_1) assert_array_equal(argmin_0, argmin_1) # Changing the axe and permuting datasets must give the same results argmin_0 = pairwise_distances_argmin(X, Y, axis=0) argmin_1 = pairwise_distances_argmin(Y, X, axis=1) assert_array_equal(argmin_0, argmin_1) argmin_0 = pairwise_distances_argmin_min(X, X, axis=0) argmin_1 = pairwise_distances_argmin_min(X, X, axis=1) assert_array_equal(argmin_0, argmin_1) " 57007,"def check_can_modify_voiceartist_in_activity(user, activity_rights): """"""Check whether the user can assign voicearttist to activity. Args: user: UserActionInfo. Object having user_id, role, and actions for given user. activity_rights: AcitivityRights or None. Rights object for the given activity. Returns: bool. Whether the user can assign voiceartist. """""" if activity_rights is None: return False if (activity_rights.community_owned and role_services.ACTION_CAN_ASSIGN_VOICEARTIST in user.actions): return True elif (activity_rights.is_published() and role_services.ACTION_CAN_ASSIGN_VOICEARTIST in user.actions): return True else: return False ","def check_can_modify_voiceartist_in_activity(user, activity_rights): """"""Check whether the user can assign voicearttist to activity. Args: user: UserActionInfo. Object having user_id, role, and actions for given user. activity_rights: AcitivityRights or None. Rights object for the given activity. Returns: bool. Whether the user can assign voiceartist. """""" if activity_rights is None: return False if (role_services.ACTION_CAN_ASSIGN_VOICEARTIST in user.actions and ( activity_rights.is_published() or activity_rights.community_owned)): return True else: return False " 25775,"def busmap_by_louvain(network): logger.warning( ""Function ``busmap_by_louvain`` is deprecated and will not work a future release. "" ""Use ``busmap_by_kmeans`` or ``busmap_by_spectral_clustering`` instead."" ) if find_spec('community') is None: raise ModuleNotFoundError(""Optional dependency 'community' not found."" ""Install via 'conda install -c conda-forge python-louvain' "" ""or 'pip install python-louvain'"") import community lines = (network.lines.loc[:, ['bus0', 'bus1']] .assign(weight=network.lines.s_max_pu*network.lines.s_nom/abs(network.lines.r+1j*network.lines.x)) .set_index(['bus0','bus1'])) lines = lines.append(network.links.loc[:, ['bus0', 'bus1']] .assign(weight=network.links.p_max_pu*network.links.p_nom).set_index(['bus0','bus1'])) G = nx.Graph() G.add_nodes_from(network.buses.index) G.add_edges_from((u,v,dict(weight=w)) for (u,v),w in lines.itertuples()) b = community.best_partition(G) list_cluster = [] for i in b: list_cluster.append(str(b[i])) return pd.Series(list_cluster, index=network.buses.index) ","def busmap_by_louvain(network): logger.warning( ""Function ``busmap_by_louvain`` is deprecated and will not work a future release. "" ""Use ``busmap_by_kmeans`` or ``busmap_by_spectral_clustering`` instead."" ) if find_spec('community') is None: raise ModuleNotFoundError(""Optional dependency 'community' not found."" ""Install via 'conda install -c conda-forge python-louvain' "" ""or 'pip install python-louvain'"") import community lines = (network.lines[['bus0', 'bus1']] .assign(weight=network.lines.s_max_pu*network.lines.s_nom/abs(network.lines.r+1j*network.lines.x)) .set_index(['bus0','bus1'])) lines = lines.append(network.links.loc[:, ['bus0', 'bus1']] .assign(weight=network.links.p_max_pu*network.links.p_nom).set_index(['bus0','bus1'])) G = nx.Graph() G.add_nodes_from(network.buses.index) G.add_edges_from((u,v,dict(weight=w)) for (u,v),w in lines.itertuples()) b = community.best_partition(G) list_cluster = [] for i in b: list_cluster.append(str(b[i])) return pd.Series(list_cluster, index=network.buses.index) " 37949,"def load_japan_quakes(): """""" Load a table of earthquakes around Japan as a pandas.DataFrame. Data is from the NOAA NGDC database. This is the ``@tut_quakes.ngdc`` dataset used in the GMT tutorials. The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the first time you invoke this function. Afterwards, it will load the data from the cache. So you'll need an internet connection the first time around. Returns ------- data : pandas.DataFrame The data table. Columns are year, month, day, latitude, longitude, depth (in km), and magnitude of the earthquakes. """""" fname = which(""@tut_quakes.ngdc"", download=""c"") data = pd.read_csv(fname, header=1, sep=r""\s+"") data.columns = [ ""year"", ""month"", ""day"", ""latitude"", ""longitude"", ""depth_km"", ""magnitude"", ] return data ","def load_japan_quakes(): """""" Load a table of earthquakes around Japan as a pandas.DataFrame. Data is from the NOAA NGDC database. This is the ``@tut_quakes.ngdc`` dataset used in the GMT tutorials. The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the first time you invoke this function. Afterwards, it will load the data from the cache. So you'll need an internet connection the first time around. Returns ------- data : pandas.DataFrame The data table. Columns are year, month, day, latitude, longitude, depth (in km), and magnitude of the earthquakes. """""" fname = which(""@tut_quakes.ngdc"", download=""c"") data = pd.read_csv(fname, header=1, sep=r""\s+"") data.columns = [ ""year"", ""month"", ""day"", ""latitude"", ""longitude"", ""depth_km"", ""magnitude"", ] return data " 23222,"def _import_by_name(name: str, last_errors = None) -> Tuple[Any, Any, str]: """"""Import a Python object given its full name."""""" try: name_parts = name.split('.') # try first interpret `name` as MODNAME.OBJ modname = '.'.join(name_parts[:-1]) if modname: try: mod = import_module(modname) return getattr(mod, name_parts[-1]), mod, modname except (ImportError, IndexError, AttributeError) as e: if last_errors is not None: last_errors.append(str(str(e.args[0]))) pass # ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ... last_j = 0 modname = None for j in reversed(range(1, len(name_parts) + 1)): last_j = j modname = '.'.join(name_parts[:j]) try: import_module(modname) except ImportError as e: if last_errors is not None: last_errors.append(str(e.args[0])) continue if modname in sys.modules: break if last_j < len(name_parts): parent = None obj = sys.modules[modname] for obj_name in name_parts[last_j:]: parent = obj obj = getattr(obj, obj_name) return obj, parent, modname else: return sys.modules[modname], None, modname except (ValueError, ImportError, AttributeError, KeyError) as e: raise ImportError(*e.args) from e ","def _import_by_name(name: str, last_errors = None) -> Tuple[Any, Any, str]: """"""Import a Python object given its full name."""""" try: name_parts = name.split('.') # try first interpret `name` as MODNAME.OBJ modname = '.'.join(name_parts[:-1]) if modname: try: mod = import_module(modname) return getattr(mod, name_parts[-1]), mod, modname except (ImportError, IndexError, AttributeError) as e: if last_errors is not None: last_errors.append(str(e)) pass # ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ... last_j = 0 modname = None for j in reversed(range(1, len(name_parts) + 1)): last_j = j modname = '.'.join(name_parts[:j]) try: import_module(modname) except ImportError as e: if last_errors is not None: last_errors.append(str(e.args[0])) continue if modname in sys.modules: break if last_j < len(name_parts): parent = None obj = sys.modules[modname] for obj_name in name_parts[last_j:]: parent = obj obj = getattr(obj, obj_name) return obj, parent, modname else: return sys.modules[modname], None, modname except (ValueError, ImportError, AttributeError, KeyError) as e: raise ImportError(*e.args) from e " 6955,"def setup_user_email_inbox(email_account, awaiting_password, email_id, enable_outgoing): """""" setup email inbox for user """""" from frappe.core.doctype.user.user import ask_pass_update def add_user_email(user): user = frappe.get_doc(""User"", user) row = user.append(""user_emails"", {}) row.email_id = email_id row.email_account = email_account row.awaiting_password = awaiting_password or 0 row.enable_outgoing = enable_outgoing or 0 user.save(ignore_permissions=True) update_user_email_settings = False if not all([email_account, email_id]): return user_names = frappe.db.get_values(""User"", {""email"": email_id}, as_dict=True) if not user_names: return for user in user_names: user_name = user.get(""name"") # check if inbox is alreay configured user_inbox = frappe.db.get_value(""User Email"", { ""email_account"": email_account, ""parent"": user_name }, [""name""]) or None if not user_inbox: add_user_email(user_name) else: # update awaiting password for email account update_user_email_settings = True if update_user_email_settings: UserEmail = frappe.qb.DocType(""User Email"") frappe.qb.update(UserEmail) \ .set(UserEmail.awaiting_password, (awaiting_password or 0)) \ .set(UserEmail.enable_outgoing, enable_outgoing) \ .where(UserEmail.email_account == email_account).run(debug=1) else: users = "" and "".join([frappe.bold(user.get(""name"")) for user in user_names]) frappe.msgprint(_(""Enabled email inbox for user {0}"").format(users)) ask_pass_update() ","def setup_user_email_inbox(email_account, awaiting_password, email_id, enable_outgoing): """""" setup email inbox for user """""" from frappe.core.doctype.user.user import ask_pass_update def add_user_email(user): user = frappe.get_doc(""User"", user) row = user.append(""user_emails"", {}) row.email_id = email_id row.email_account = email_account row.awaiting_password = awaiting_password or 0 row.enable_outgoing = enable_outgoing or 0 user.save(ignore_permissions=True) update_user_email_settings = False if not all([email_account, email_id]): return user_names = frappe.db.get_values(""User"", {""email"": email_id}, as_dict=True) if not user_names: return for user in user_names: user_name = user.get(""name"") # check if inbox is alreay configured user_inbox = frappe.db.get_value(""User Email"", { ""email_account"": email_account, ""parent"": user_name }, [""name""]) or None if not user_inbox: add_user_email(user_name) else: # update awaiting password for email account update_user_email_settings = True if update_user_email_settings: UserEmail = frappe.qb.DocType(""User Email"") frappe.qb.update(UserEmail) \ .set(UserEmail.awaiting_password, (awaiting_password or 0)) \ .set(UserEmail.enable_outgoing, enable_outgoing) \ .where(UserEmail.email_account == email_account).run() else: users = "" and "".join([frappe.bold(user.get(""name"")) for user in user_names]) frappe.msgprint(_(""Enabled email inbox for user {0}"").format(users)) ask_pass_update() " 1191,"def is_ndarray_of_int_or_bool(obj): return (isinstance(obj, np.ndarray) and (np.issubdtype(obj.dtype, np.integer) or np.issubdtype(obj.dtype, np.dtype(bool).type))) ","def is_ndarray_of_int_or_bool(obj): return (isinstance(obj, np.ndarray) and (np.issubdtype(obj.dtype, np.integer) or np.issubdtype(obj.dtype, np.bool_))) " 40550,"def app_update(cmd, client, resource_group, service, name, assign_endpoint=None, deployment=None, runtime_version=None, jvm_options=None, main_entry=None, env=None, enable_persistent_storage=None, https_only=None, enable_end_to_end_tls=None, loaded_public_cert_file=None): client = get_mgmt_service_client(cmd.cli_ctx, AppPlatformManagementClient_20210901preview) _check_active_deployment_exist(client, resource_group, service, name) resource = client.services.get(resource_group, service) location = resource.location properties = models_20210901preview.AppResourceProperties(public=assign_endpoint, https_only=https_only, enable_end_to_end_tls=enable_end_to_end_tls) if enable_persistent_storage is True: properties.persistent_disk = models_20210901preview.PersistentDisk( size_in_gb=_get_persistent_disk_size(resource.sku.tier), mount_path=""/persistent"") if enable_persistent_storage is False: properties.persistent_disk = models_20210901preview.PersistentDisk(size_in_gb=0) if loaded_public_cert_file is not None and os.path.isfile(loaded_public_cert_file): data = get_file_json(loaded_public_cert_file, throw_on_empty=False) loaded_certificates = [] if data: for item in data['loadedCertificates']: certificate_resource = client.certificates.get(resource_group, service, item['certificateName']) loaded_certificates.append(models_20210901preview. LoadedCertificate(resource_id=certificate_resource.id, load_trust_store=item['loadTrustStore'])) properties.loaded_certificates = loaded_certificates app_resource = models_20210901preview.AppResource() app_resource.properties = properties app_resource.location = location logger.warning(""[1/2] updating app '{}'"".format(name)) poller = client.apps.begin_update( resource_group, service, name, app_resource) while poller.done() is False: sleep(APP_CREATE_OR_UPDATE_SLEEP_INTERVAL) app_updated = client.apps.get(resource_group, service, name) if deployment is None: logger.warning( ""No '--deployment' given, will update app's production deployment"") deployment = client.apps.get( resource_group, service, name).properties.active_deployment_name if deployment is None: logger.warning(""No production deployment found for update"") return app_updated logger.warning(""[2/2] Updating deployment '{}'"".format(deployment)) deployment_settings = models_20210901preview.DeploymentSettings( environment_variables=env, jvm_options=jvm_options, net_core_main_entry_path=main_entry, runtime_version=runtime_version, ) deployment_settings.cpu = None deployment_settings.memory_in_gb = None properties = models_20210901preview.DeploymentResourceProperties( deployment_settings=deployment_settings) deployment_resource = models.DeploymentResource(properties=properties) poller = client.deployments.begin_update( resource_group, service, name, deployment, deployment_resource) while poller.done() is False: sleep(DEPLOYMENT_CREATE_OR_UPDATE_SLEEP_INTERVAL) deployment = client.deployments.get( resource_group, service, name, deployment) app_updated.properties.active_deployment = deployment return app_updated ","def app_update(cmd, client, resource_group, service, name, assign_endpoint=None, deployment=None, runtime_version=None, jvm_options=None, main_entry=None, env=None, enable_persistent_storage=None, https_only=None, enable_end_to_end_tls=None, loaded_public_cert_file=None): client = get_mgmt_service_client(cmd.cli_ctx, AppPlatformManagementClient_20210901preview) _check_active_deployment_exist(client, resource_group, service, name) resource = client.services.get(resource_group, service) location = resource.location properties = models_20210901preview.AppResourceProperties(public=assign_endpoint, https_only=https_only, enable_end_to_end_tls=enable_end_to_end_tls) if enable_persistent_storage is True: properties.persistent_disk = models_20210901preview.PersistentDisk( size_in_gb=_get_persistent_disk_size(resource.sku.tier), mount_path=""/persistent"") if enable_persistent_storage is False: properties.persistent_disk = models_20210901preview.PersistentDisk(size_in_gb=0) if loaded_public_cert_file is not None and os.path.isfile(loaded_public_cert_file): data = get_file_json(loaded_public_cert_file, throw_on_empty=False) loaded_certificates = [] if data: for item in data['loadedCertificates']: certificate_resource = client.certificates.get(resource_group, service, item['certificateName']) loaded_certificates.append(models_20210901preview. LoadedCertificate(resource_id=certificate_resource.id, load_trust_store=item['loadTrustStore'])) properties.loaded_certificates = loaded_certificates app_resource = models_20210901preview.AppResource() app_resource.properties = properties app_resource.location = location logger.warning(""[1/2] updating app '{}'"".format(name)) poller = client.apps.begin_update( resource_group, service, name, app_resource) while poller.done() is False: sleep(APP_CREATE_OR_UPDATE_SLEEP_INTERVAL) app_updated = client.apps.get(resource_group, service, name) if deployment is None: logger.warning( ""No '--deployment' given, will update app's production deployment"") deployment = client.apps.get( resource_group, service, name).properties.active_deployment_name if deployment is None: logger.warning(""No production deployment found for update"") return app_updated logger.warning(""[2/2] Updating deployment '{}'"".format(deployment)) deployment_settings = models_20210901preview.DeploymentSettings( environment_variables=env, jvm_options=jvm_options, net_core_main_entry_path=main_entry, runtime_version=runtime_version) deployment_settings.cpu = None deployment_settings.memory_in_gb = None properties = models_20210901preview.DeploymentResourceProperties( deployment_settings=deployment_settings) deployment_resource = models.DeploymentResource(properties=properties) poller = client.deployments.begin_update( resource_group, service, name, deployment, deployment_resource) while poller.done() is False: sleep(DEPLOYMENT_CREATE_OR_UPDATE_SLEEP_INTERVAL) deployment = client.deployments.get( resource_group, service, name, deployment) app_updated.properties.active_deployment = deployment return app_updated " 44070,"def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]: """""" Fragments a cut graph into a collection of subgraphs as well as returning the communication/quotient graph. Args: graph (MultiDiGraph): directed multigraph containing measure and prepare nodes at cut locations Returns: subgraphs, communication_graph (Tuple[Tuple[MultiDiGraph], MultiDiGraph]): the subgraphs of the cut graph and the communication graph where each node represents a fragment and edges denote the flow of qubits between fragments **Example** Consider the following circuit with the manually-placed wire cuts: .. code-block:: python from pennylane.transforms import qcut wire_cut_0 = qml.WireCut(wires=0) wire_cut_1 = qml.WireCut(wires=1) multi_wire_cut = qml.WireCut(wires=[0, 1]) with qml.tape.QuantumTape() as tape: qml.RX(0.4, wires=0) qml.apply(wire_cut_0) qml.RY(0.5, wires=0) qml.apply(wire_cut_1) qml.CNOT(wires=[0, 1]) qml.apply(multi_wire_cut) qml.RZ(0.6, wires=1) qml.expval(qml.PauliZ(0)) We can find the corresponding graph, remove all the wire cut nodes, and find the subgraphs and communication graph by using: >>> graph = qcut.tape_to_graph(tape) >>> qcut.replace_wire_cut_nodes(graph) >>> qcut.fragment_graph(graph) ((, , , ), ) """""" edges = list(graph.edges) cut_edges = [] for node1, node2, _ in edges: if isinstance(node1, MeasureNode): assert isinstance(node2, PrepareNode) cut_edges.append((node1, node2)) graph.remove_edge(node1, node2) subgraph_nodes = weakly_connected_components(graph) subgraphs = tuple(graph.subgraph(n) for n in subgraph_nodes) communication_graph = MultiDiGraph() communication_graph.add_nodes_from(range(len(subgraphs))) for node1, node2 in cut_edges: for i, subgraph in enumerate(subgraphs): if subgraph.has_node(node1): start_fragment = i if subgraph.has_node(node2): end_fragment = i communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2)) return subgraphs, communication_graph ","def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]: """""" Fragments a cut graph into a collection of subgraphs as well as returning the communication/quotient graph. Args: graph (MultiDiGraph): directed multigraph containing measure and prepare nodes at cut locations Returns: subgraphs, communication_graph (Tuple[Tuple[MultiDiGraph], MultiDiGraph]): the subgraphs of the cut graph and the communication graph node represents a fragment and edges denote the flow of qubits between fragments **Example** Consider the following circuit with the manually-placed wire cuts: .. code-block:: python from pennylane.transforms import qcut wire_cut_0 = qml.WireCut(wires=0) wire_cut_1 = qml.WireCut(wires=1) multi_wire_cut = qml.WireCut(wires=[0, 1]) with qml.tape.QuantumTape() as tape: qml.RX(0.4, wires=0) qml.apply(wire_cut_0) qml.RY(0.5, wires=0) qml.apply(wire_cut_1) qml.CNOT(wires=[0, 1]) qml.apply(multi_wire_cut) qml.RZ(0.6, wires=1) qml.expval(qml.PauliZ(0)) We can find the corresponding graph, remove all the wire cut nodes, and find the subgraphs and communication graph by using: >>> graph = qcut.tape_to_graph(tape) >>> qcut.replace_wire_cut_nodes(graph) >>> qcut.fragment_graph(graph) ((, , , ), ) """""" edges = list(graph.edges) cut_edges = [] for node1, node2, _ in edges: if isinstance(node1, MeasureNode): assert isinstance(node2, PrepareNode) cut_edges.append((node1, node2)) graph.remove_edge(node1, node2) subgraph_nodes = weakly_connected_components(graph) subgraphs = tuple(graph.subgraph(n) for n in subgraph_nodes) communication_graph = MultiDiGraph() communication_graph.add_nodes_from(range(len(subgraphs))) for node1, node2 in cut_edges: for i, subgraph in enumerate(subgraphs): if subgraph.has_node(node1): start_fragment = i if subgraph.has_node(node2): end_fragment = i communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2)) return subgraphs, communication_graph " 40211,"def extend_line(line, start_extension=0, end_extension=0): """"""Extend the given line from one end or the other, or both, depending on the given values Parameters ---------- line : tuple Two points defining the line. start_extension : float The extension distance at the start of the line as float. end_extension : float The extension distance at the end of the line as float. Returns ------- extended line : tuple Two points defining the offset line. Examples -------- >>> line = Line([0.0,0.0,0.0],[1.0,0.0,0.0]) >>> extended_line = extend_line(line, 1, 1) Line([-1.0,0.0,0.0],[2.0,0.0,0.0]) """""" def calculate_translation(line, distance): vector = line.direction.copy() vector.scale(distance) return Translation(vector) if start_extension != 0: translation = calculate_translation(line, -start_extension) line.start.transform(translation) if end_extension != 0: translation = calculate_translation(line, end_extension) line.end.transform(translation) return line ","def extend_line(line, start_extension=0, end_extension=0): """"""Extend the given line from one end or the other, or both, depending on the given values Parameters ---------- line : tuple Two points defining the line. start_extension : float The extension distance at the start of the line as float. end_extension : float The extension distance at the end of the line as float. Returns ------- extended line : tuple Two points defining the offset line. Examples -------- >>> line = Line([0.0,0.0,0.0],[1.0,0.0,0.0]) >>> extended_line = extend_line(line, 1, 1) Line([-1.0, 0.0, 0.0], [2.0, 0.0, 0.0]) """""" def calculate_translation(line, distance): vector = line.direction.copy() vector.scale(distance) return Translation(vector) if start_extension != 0: translation = calculate_translation(line, -start_extension) line.start.transform(translation) if end_extension != 0: translation = calculate_translation(line, end_extension) line.end.transform(translation) return line " 59391,"def test_nested_config(): nested_config = {""foo"": {""bar"": 1}} run = wandb.init(mode=""offline"", config=nested_config, config_unnest=True) assert run.config.as_dict() == {""foo.bar"": 1} assert run.config_nested == nested_config ","def test_nested_config(test_settings): test_settings.update(mode=""offline"") nested_config = {""foo"": {""bar"": 1}} run = wandb.init(settings=test_settings, config=nested_config, config_unnest=True) assert run.config.as_dict() == {""foo.bar"": 1} assert run.config_nested == nested_config run.finish() " 4497,"def test_ajd(): """"""Test approximate joint diagonalization."""""" # The implementation shuold obtain the same # results as the Matlab implementation by Pham Dinh-Tuan. # Generate a set of cavariances matrices for test purpose n_times, n_channels = 10, 3 seed = np.random.RandomState(0) diags = 2.0 + 0.1 * seed.randn(n_times, n_channels) A = 2 * seed.rand(n_channels, n_channels) - 1 A /= np.atleast_2d(np.sqrt(np.sum(A ** 2, 1))).T covmats = np.empty((n_times, n_channels, n_channels)) for i in range(n_times): covmats[i] = np.linalg.multi_dot([A, np.diag(diags[i]), A.T]) V, D = _ajd_pham(covmats) # Results obtained with original matlab implementation V_matlab = [[-3.507280775058041, -5.498189967306344, 7.720624541198574], [0.694689013234610, 0.775690358505945, -1.162043086446043], [-0.592603135588066, -0.598996925696260, 1.009550086271192]] assert_array_almost_equal(V, V_matlab) ","def test_ajd(): """"""Test approximate joint diagonalization."""""" # The implementation shuold obtain the same # results as the Matlab implementation by Pham Dinh-Tuan. # Generate a set of cavariances matrices for test purpose n_times, n_channels = 10, 3 seed = np.random.RandomState(0) diags = 2.0 + 0.1 * seed.randn(n_times, n_channels) A = 2 * seed.rand(n_channels, n_channels) - 1 A /= np.atleast_2d(np.sqrt(np.sum(A ** 2, 1))).T covmats = np.empty((n_times, n_channels, n_channels)) for i in range(n_times): np.linalg.multi_dot([A, np.diag(diags[i]), A.T], out=covmats[i]) V, D = _ajd_pham(covmats) # Results obtained with original matlab implementation V_matlab = [[-3.507280775058041, -5.498189967306344, 7.720624541198574], [0.694689013234610, 0.775690358505945, -1.162043086446043], [-0.592603135588066, -0.598996925696260, 1.009550086271192]] assert_array_almost_equal(V, V_matlab) " 25624,"def test_leaf_components_combination_kernel(): """""" Regression test for issue #1066 """""" k = gpflow.kernels.SquaredExponential() + gpflow.kernels.SquaredExponential() assert leaf_components(k), ""Combination kernel should have non-empty leaf components"" ","def test_leaf_components_combination_kernel(): """""" Regression test for kernel compositions - output for printing should not be empty (issue #1066). """""" k = gpflow.kernels.SquaredExponential() + gpflow.kernels.SquaredExponential() assert leaf_components(k), ""Combination kernel should have non-empty leaf components"" " 36796,"def parse_coin_args(args: str) -> tuple[CoinsArgs, bool]: """""" Parses a user's coin string into a representation of each currency. If the user input is a decimal number, assumes gold pieces. Otherwise, allows the user to specify currencies in the form ``/(([+-]?\d+)\s*([pgesc]p)?)+/`` (e.g. +1gp -2sp 3cp). """""" try: return _parse_coin_args_float(float(args)), False except ValueError: return _parse_coin_args_re(args), True ","def parse_coin_args(args: str) -> Tuple[CoinsArgs, bool]: """""" Parses a user's coin string into a representation of each currency. If the user input is a decimal number, assumes gold pieces. Otherwise, allows the user to specify currencies in the form ``/(([+-]?\d+)\s*([pgesc]p)?)+/`` (e.g. +1gp -2sp 3cp). """""" try: return _parse_coin_args_float(float(args)), False except ValueError: return _parse_coin_args_re(args), True " 33788,"def make_fastapi_class_based_view(fastapi_app, cls: Type) -> None: """"""Transform the `cls`'s methods and class annotations to FastAPI routes. Modified from https://github.com/dmontagu/fastapi-utils/blob/master/fastapi_utils/cbv.py Usage: >>> app = FastAPI() >>> class A: @app.route(""/{i}"") def func(self, i: int) -> str: return self.dep + i >>> # just running the app won't work, here. >>> make_fastapi_class_based_view(app, A) >>> # now app can be run properly """""" # Delayed import to prevent ciruclar imports in workers. from fastapi import Depends, APIRouter from fastapi.routing import APIRoute def get_current_servable_instance(): from ray import serve return serve.get_replica_context().servable_object # Find all the class method routes member_methods = { func for _, func in inspect.getmembers(cls, inspect.isfunction) } class_method_routes = [ route for route in fastapi_app.routes if isinstance(route, APIRoute) and route.endpoint in member_methods ] # Modify these routes and mount it to a new APIRouter. # We need to to this (instead of modifying in place) because we want to use # the laster fastapi_app.include_router to re-run the dependency analysis # for each routes. new_router = APIRouter() for route in class_method_routes: fastapi_app.routes.remove(route) # This block just adds a default values to the self parameters so that # FastAPI knows to inject the object when calling the route. # Before: def method(self, i): ... # After: def method(self=Depends(...), *, i):... old_endpoint = route.endpoint old_signature = inspect.signature(old_endpoint) old_parameters = list(old_signature.parameters.values()) if len(old_parameters) == 0: # TODO(simon): make it more flexible to support no arguments. raise RayServeException( ""Methods in FastAPI class based view must have ``self`` as "" ""first argument."") old_self_parameter = old_parameters[0] new_self_parameter = old_self_parameter.replace( default=Depends(get_current_servable_instance)) new_parameters = [new_self_parameter] + [ # Make the rest of the parameters keyword only because # the first argument is no longer positional. parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY) for parameter in old_parameters[1:] ] new_signature = old_signature.replace(parameters=new_parameters) setattr(route.endpoint, ""__signature__"", new_signature) setattr(route.endpoint, ""_serve_cls"", cls) new_router.routes.append(route) fastapi_app.include_router(new_router) # Remove endpoints that belong to other class based views. routes = fastapi_app.routes for route in routes: serve_cls = getattr(route.endpoint, ""_serve_cls"", None) if serve_cls is not None and serve_cls != cls: routes.remove(route) ","def make_fastapi_class_based_view(fastapi_app, cls: Type) -> None: """"""Transform the `cls`'s methods and class annotations to FastAPI routes. Modified from https://github.com/dmontagu/fastapi-utils/blob/master/fastapi_utils/cbv.py Usage: >>> app = FastAPI() >>> class A: @app.route(""/{i}"") def func(self, i: int) -> str: return self.dep + i >>> # just running the app won't work, here. >>> make_fastapi_class_based_view(app, A) >>> # now app can be run properly """""" # Delayed import to prevent ciruclar imports in workers. from fastapi import Depends, APIRouter from fastapi.routing import APIRoute def get_current_servable_instance(): from ray import serve return serve.get_replica_context().servable_object # Find all the class method routes member_methods = { func for _, func in inspect.getmembers(cls, inspect.isfunction) } class_method_routes = [ route for route in fastapi_app.routes if isinstance(route, APIRoute) and route.endpoint in member_methods ] # Modify these routes and mount it to a new APIRouter. # We need to to this (instead of modifying in place) because we want to use # the laster fastapi_app.include_router to re-run the dependency analysis # for each routes. new_router = APIRouter() for route in class_method_routes: fastapi_app.routes.remove(route) # This block just adds a default values to the self parameters so that # FastAPI knows to inject the object when calling the route. # Before: def method(self, i): ... # After: def method(self=Depends(...), *, i):... old_endpoint = route.endpoint old_signature = inspect.signature(old_endpoint) old_parameters = list(old_signature.parameters.values()) if len(old_parameters) == 0: # TODO(simon): make it more flexible to support no arguments. raise RayServeException( ""Methods in FastAPI class based view must have ``self`` as "" ""their first argument."") old_self_parameter = old_parameters[0] new_self_parameter = old_self_parameter.replace( default=Depends(get_current_servable_instance)) new_parameters = [new_self_parameter] + [ # Make the rest of the parameters keyword only because # the first argument is no longer positional. parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY) for parameter in old_parameters[1:] ] new_signature = old_signature.replace(parameters=new_parameters) setattr(route.endpoint, ""__signature__"", new_signature) setattr(route.endpoint, ""_serve_cls"", cls) new_router.routes.append(route) fastapi_app.include_router(new_router) # Remove endpoints that belong to other class based views. routes = fastapi_app.routes for route in routes: serve_cls = getattr(route.endpoint, ""_serve_cls"", None) if serve_cls is not None and serve_cls != cls: routes.remove(route) " 29348,"def replace_ids_with_names_in_dict(exploration_id): """"""Replace ids with the names and return rights as a dictionary. Args: exploration_id: str. ID of the exploration. Returns: dict. The dictionary where ids are replaced with corresponding names. """""" rights_dict = rights_manager.get_exploration_rights( exploration_id).to_dict() if rights_dict['community_owned']: rights_dict['owner_names'] = [] rights_dict['editor_names'] = [] rights_dict['voice_artist_names'] = [] rights_dict['viewer_names'] = [] else: rights_dict['owner_names'] = ( user_services.get_human_readable_user_ids( rights_dict['owner_ids'])) rights_dict['editor_names'] = ( user_services.get_human_readable_user_ids( rights_dict['editor_ids'])) rights_dict['voice_artist_names'] = ( user_services.get_human_readable_user_ids( rights_dict['voice_artist_ids'])) rights_dict['viewer_names'] = ( user_services.get_human_readable_user_ids( rights_dict['viewer_ids'])) rights_dict.pop('owner_ids') rights_dict.pop('editor_ids') rights_dict.pop('voice_artist_ids') rights_dict.pop('viewer_ids') return rights_dict ","def get_exploration_rights_with_names(exploration_id): """"""Replace ids with the names and return rights as a dictionary. Args: exploration_id: str. ID of the exploration. Returns: dict. The dictionary where ids are replaced with corresponding names. """""" rights_dict = rights_manager.get_exploration_rights( exploration_id).to_dict() if rights_dict['community_owned']: rights_dict['owner_names'] = [] rights_dict['editor_names'] = [] rights_dict['voice_artist_names'] = [] rights_dict['viewer_names'] = [] else: rights_dict['owner_names'] = ( user_services.get_human_readable_user_ids( rights_dict['owner_ids'])) rights_dict['editor_names'] = ( user_services.get_human_readable_user_ids( rights_dict['editor_ids'])) rights_dict['voice_artist_names'] = ( user_services.get_human_readable_user_ids( rights_dict['voice_artist_ids'])) rights_dict['viewer_names'] = ( user_services.get_human_readable_user_ids( rights_dict['viewer_ids'])) rights_dict.pop('owner_ids') rights_dict.pop('editor_ids') rights_dict.pop('voice_artist_ids') rights_dict.pop('viewer_ids') return rights_dict " 46541,"def build_spec(version: str, source_files: List[str]) -> str: all_spescs = [get_spec(spec) for spec in source_files] spec_object = all_spescs[0] for value in all_spescs[1:]: spec_object = combine_spec_objects(spec_object, value) dependency_order_spec(spec_object) return objects_to_spec(spec_object, version_imports[version], version) ","def build_spec(version: str, source_files: List[str]) -> str: all_spescs = [get_spec(spec) for spec in source_files] spec_object = all_specs[0] for value in all_spescs[1:]: spec_object = combine_spec_objects(spec_object, value) dependency_order_spec(spec_object) return objects_to_spec(spec_object, version_imports[version], version) " 43177,"def train(model, optim, virtual_pipeline_model_parallel_size, pipeline_model_parallel_size): sequence_len = global_vars.get_args().seq_length micro_batch_size = global_vars.get_args().micro_batch_size hidden_size = global_vars.get_args().hidden_size forward_backward_func = get_forward_backward_func(virtual_pipeline_model_parallel_size, pipeline_model_parallel_size) tensor_shape = (args.seq_length, args.micro_batch_size, args.hidden_size) for i in range(16): batch = generate_fancy_data_labels(sequence_len, batch_size) optim.zero_grad() forward_backward_func(fwd_step_func, batch, model, forward_only=False, tensor_shape=tensor_shape) optim.step() ","def train(model, optim, virtual_pipeline_model_parallel_size, pipeline_model_parallel_size): sequence_len = global_vars.get_args().seq_length micro_batch_size = global_vars.get_args().micro_batch_size hidden_size = global_vars.get_args().hidden_size forward_backward_func = get_forward_backward_func(virtual_pipeline_model_parallel_size, pipeline_model_parallel_size) tensor_shape = (args.seq_length, args.micro_batch_size, args.hidden_size) for _ in range(16): batch = generate_fancy_data_labels(sequence_len, batch_size) optim.zero_grad() forward_backward_func(fwd_step_func, batch, model, forward_only=False, tensor_shape=tensor_shape) optim.step() " 54445,"def test_fanova_importance_evaluator_max_depth() -> None: # Assumes that `seed` can be fixed to reproduce identical results. study = create_study() study.optimize(objective, n_trials=3) evaluator = FanovaImportanceEvaluator(max_depth=1, seed=0) param_importance = evaluator.evaluate(study) evaluator = FanovaImportanceEvaluator(max_depth=2, seed=0) param_importance_different_n_trees = evaluator.evaluate(study) assert param_importance != param_importance_different_n_trees ","def test_fanova_importance_evaluator_max_depth() -> None: # Assumes that `seed` can be fixed to reproduce identical results. study = create_study() study.optimize(objective, n_trials=3) evaluator = FanovaImportanceEvaluator(max_depth=1, seed=0) param_importance = evaluator.evaluate(study) evaluator = FanovaImportanceEvaluator(max_depth=2, seed=0) param_importance_different_max_depth = evaluator.evaluate(study) assert param_importance != param_importance_different_max_depth " 8307,"def _retry_if_failures_are_restorable(remote, args): # wait at most 5 minutes with safe_while(sleep=10, tries=30) as proceed: while proceed(): stdout = StringIO() stderr = StringIO() try: return remote.run(args=args, stderr=stderr, stdout=stdout) except run.CommandFailedError: if ""status code: 503"" in stdout.getvalue().lower(): continue if ""failed to download metadata for repo"" in stderr.getvalue().lower(): continue else: raise ","def _retry_if_failures_are_recoverable(remote, args): # wait at most 5 minutes with safe_while(sleep=10, tries=30) as proceed: while proceed(): stdout = StringIO() stderr = StringIO() try: return remote.run(args=args, stderr=stderr, stdout=stdout) except run.CommandFailedError: if ""status code: 503"" in stdout.getvalue().lower(): continue if ""failed to download metadata for repo"" in stderr.getvalue().lower(): continue else: raise " 51520,"def email_block(email): _, domain = email.split(""@"", 1) if domain.lower() in blacklist or domain.lower() in settings.EMAIL_BLOCK_DOMAIN: abort(400, message=""Bad email address."") ","def require_allowed_email(email): # `example.com` and `example.com.` are equal - last dot stands for DNS root but usually is omitted _, domain = email.lower().rstrip(""."").split(""@"", 1) if domain in blacklist or domain in settings.BLOCKED_DOMAINS: abort(400, message=""Bad email address."") " 31075,"def fetch_indicators(client: TaxiiClient, tlp_color: Optional[str] = None, hours_back: str = '24 hours', tags: Optional[List[str]] = None): time_field = 'time' end = datetime.utcnow() start = demisto.getLastRun().get(time_field) if start is None: start = (end - timedelta(hours=get_first_fetch(hours_back))).strftime(TIME_FORMAT) end = end.strftime(TIME_FORMAT) data = client.poll_request(start, end) try: demisto.createIndicators(list(Indicators.indicators_from_data(data, tlp_color, tags))) demisto.setLastRun({time_field: end}) except EmptyData: pass ","def fetch_indicators(client: TaxiiClient, tlp_color: Optional[str] = None, hours_back: str = '24 hours', tags: Optional[List[str]] = None): time_field = 'time' end = datetime.utcnow() start = demisto.getLastRun().get(time_field) if start is None: start = dateparser.parse(hours_back).strftime(TIME_FORMAT) end = end.strftime(TIME_FORMAT) data = client.poll_request(start, end) try: demisto.createIndicators(list(Indicators.indicators_from_data(data, tlp_color, tags))) demisto.setLastRun({time_field: end}) except EmptyData: pass " 5422,"def test__api_decrypt(encrypted_data_key): # pylint: disable=no-self-use """""" _api_decrypt_response calls kms.decrypt with the configured data key as the CiphertextBlob kwarg. """""" kms_client = MagicMock() with patch.object(aws_kms, ""_kms"") as kms_getter: kms_getter.return_value = kms_client with patch.object(aws_kms, ""_cfg_data_key"", lambda: encrypted_data_key): aws_kms._api_decrypt() kms_client.decrypt.assert_called_with( CiphertextBlob=encrypted_data_key ) # pylint: disable=no-member ","def test__api_decrypt(encrypted_data_key): """""" _api_decrypt_response calls kms.decrypt with the configured data key as the CiphertextBlob kwarg. """""" kms_client = MagicMock() with patch.object(aws_kms, ""_kms"") as kms_getter: kms_getter.return_value = kms_client with patch.object(aws_kms, ""_cfg_data_key"", lambda: encrypted_data_key): aws_kms._api_decrypt() kms_client.decrypt.assert_called_with( CiphertextBlob=encrypted_data_key ) # pylint: disable=no-member " 19840,"def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, broadcast=True, parameters=None, to_xarray=True, phase_records=None, **kwargs): """""" Sample the property surface of 'output' containing the specified components and phases. Model parameters are taken from 'dbf' and any state variables (T, P, etc.) can be specified as keyword arguments. Parameters ---------- dbf : Database Thermodynamic database containing the relevant parameters. comps : str or sequence Names of components to consider in the calculation. phases : str or sequence Names of phases to consider in the calculation. mode : string, optional See 'make_callable' docstring for details. output : string, optional Model attribute to sample. fake_points : bool, optional (Default: False) If True, the first few points of the output surface will be fictitious points used to define an equilibrium hyperplane guaranteed to be above all the other points. This is used for convex hull computations. broadcast : bool, optional If True, broadcast given state variable lists against each other to create a grid. If False, assume state variables are given as equal-length lists. points : ndarray or a dict of phase names to ndarray, optional Columns of ndarrays must be internal degrees of freedom (site fractions), sorted. If this is not specified, points will be generated automatically. pdens : int, a dict of phase names to int, or a seq of both, optional Number of points to sample per degree of freedom. Default: 2000; Default when called from equilibrium(): 500 model : Model, a dict of phase names to Model, or a seq of both, optional Model class to use for each phase. sampler : callable, a dict of phase names to callable, or a seq of both, optional Function to sample phase constitution space. Must have same signature as 'pycalphad.core.utils.point_sample' grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True) Whether to add evenly spaced points between end-members. The density of points is determined by 'pdens' parameters : dict, optional Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database. phase_records : Optional[Mapping[str, PhaseRecord]] Mapping of phase names to PhaseRecord objects. Must include all active phases. The `model` argument must be a mapping of phase names to instances of Model objects. Callers must take care that the PhaseRecord objects were created with the same `output` as passed to `calculate`. Returns ------- Dataset of the sampled attribute as a function of state variables Examples -------- None yet. """""" # Here we check for any keyword arguments that are special, i.e., # there may be keyword arguments that aren't state variables pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000) points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None) callables = kwargs.pop('callables', {}) sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None) fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True), default_arg=True) model = kwargs.pop('model', None) parameters = parameters or dict() if isinstance(parameters, dict): parameters = OrderedDict(sorted(parameters.items(), key=str)) if isinstance(phases, str): phases = [phases] if isinstance(comps, (str, v.Species)): comps = [comps] comps = sorted(unpack_components(dbf, comps)) if points_dict is None and broadcast is False: raise ValueError('The \'points\' keyword argument must be specified if broadcast=False is also given.') nonvacant_components = [x for x in sorted(comps) if x.number_of_atoms > 0] desired_active_pure_elements = [list(x.constituents.keys()) for x in comps] desired_active_pure_elements = [el.upper() for constituents in desired_active_pure_elements for el in constituents] nonvacant_elements = sorted([x for x in set(desired_active_pure_elements) if x != 'VA']) all_phase_data = [] largest_energy = 1e10 # Consider only the active phases list_of_possible_phases = filter_phases(dbf, comps) if len(list_of_possible_phases) == 0: raise ConditionError('There are no phases in the Database that can be active with components {0}'.format(comps)) active_phases = filter_phases(dbf, comps, phases) if len(active_phases) == 0: raise ConditionError('None of the passed phases ({0}) are active. List of possible phases: {1}.'.format(phases, list_of_possible_phases)) if isinstance(output, (list, tuple, set)): raise NotImplementedError('Only one property can be specified in calculate() at a time') output = output if output is not None else 'GM' # Implicitly add 'N' state variable as a string to keyword arguements if it's not passed if kwargs.get('N') is None: kwargs['N'] = 1 if np.any(np.array(kwargs['N']) != 1): raise ConditionError('N!=1 is not yet supported, got N={}'.format(kwargs['N'])) # TODO: conditions dict of StateVariable instances should become part of the calculate API statevar_strings = [sv for sv in kwargs.keys() if getattr(v, sv) is not None] # If we don't do this, sympy will get confused during substitution statevar_dict = dict((v.StateVariable(key), unpack_condition(value)) for key, value in kwargs.items() if key in statevar_strings) # Sort after default state variable check to fix gh-116 statevar_dict = OrderedDict(sorted(statevar_dict.items(), key=lambda x: str(x[0]))) str_statevar_dict = OrderedDict((str(key), unpack_condition(value)) for (key, value) in statevar_dict.items()) # Build phase records if they weren't passed if phase_records is None: models = instantiate_models(dbf, comps, active_phases, model=model, parameters=parameters) phase_records = build_phase_records(dbf, comps, active_phases, statevar_dict, models=models, parameters=parameters, output=output, callables=callables, build_gradients=False, build_hessians=False, verbose=kwargs.pop('verbose', False)) else: # phase_records were provided, instantiated models must also be provided by the caller models = model if not isinstance(models, Mapping): raise ValueError(""A dictionary of instantiated models must be passed to `equilibrium` with the `model` argument if the `phase_records` argument is used."") active_phases_without_models = [name for name in active_phases if not isinstance(models.get(name), Model)] active_phases_without_phase_records = [name for name in active_phases if not isinstance(phase_records.get(name), PhaseRecord)] if len(active_phases_without_phase_records) > 0: raise ValueError(f""phase_records must contain a PhaseRecord instance for every active phase. Missing PhaseRecord objects for {sorted(active_phases_without_phase_records)}"") if len(active_phases_without_models) > 0: raise ValueError(f""model must contain a Model instance for every active phase. Missing Model objects for {sorted(active_phases_without_models)}"") maximum_internal_dof = max(len(models[phase_name].site_fractions) for phase_name in active_phases) for phase_name in sorted(active_phases): mod = models[phase_name] phase_record = phase_records[phase_name] points = points_dict[phase_name] if points is None: points = _sample_phase_constitution(mod, sampler_dict[phase_name] or point_sample, fixedgrid_dict[phase_name], pdens_dict[phase_name]) points = np.atleast_2d(points) fp = fake_points and (phase_name == sorted(active_phases)[0]) phase_ds = _compute_phase_values(nonvacant_components, str_statevar_dict, points, phase_record, output, maximum_internal_dof, broadcast=broadcast, parameters=parameters, largest_energy=float(largest_energy), fake_points=fp) all_phase_data.append(phase_ds) fp_offset = len(nonvacant_elements) if fake_points else 0 running_total = [fp_offset] + list(np.cumsum([phase_ds['X'].shape[-2] for phase_ds in all_phase_data])) islice_by_phase = {phase_name: slice(running_total[phase_idx], running_total[phase_idx+1], None) for phase_idx, phase_name in enumerate(sorted(active_phases))} # speedup for single-phase case (found by profiling) if len(all_phase_data) > 1: concatenated_coords = all_phase_data[0].coords data_vars = all_phase_data[0].data_vars concatenated_data_vars = {} for var in data_vars.keys(): data_coords = data_vars[var][0] points_idx = data_coords.index('points') # concatenation axis arrs = [] for phase_data in all_phase_data: arrs.append(getattr(phase_data, var)) concat_data = np.concatenate(arrs, axis=points_idx) concatenated_data_vars[var] = (data_coords, concat_data) final_ds = LightDataset(data_vars=concatenated_data_vars, coords=concatenated_coords) else: final_ds = all_phase_data[0] final_ds.attrs['phase_indices'] = islice_by_phase if to_xarray: return final_ds.get_dataset() else: return final_ds ","def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, broadcast=True, parameters=None, to_xarray=True, phase_records=None, **kwargs): """""" Sample the property surface of 'output' containing the specified components and phases. Model parameters are taken from 'dbf' and any state variables (T, P, etc.) can be specified as keyword arguments. Parameters ---------- dbf : Database Thermodynamic database containing the relevant parameters. comps : str or sequence Names of components to consider in the calculation. phases : str or sequence Names of phases to consider in the calculation. mode : string, optional See 'make_callable' docstring for details. output : string, optional Model attribute to sample. fake_points : bool, optional (Default: False) If True, the first few points of the output surface will be fictitious points used to define an equilibrium hyperplane guaranteed to be above all the other points. This is used for convex hull computations. broadcast : bool, optional If True, broadcast given state variable lists against each other to create a grid. If False, assume state variables are given as equal-length lists. points : ndarray or a dict of phase names to ndarray, optional Columns of ndarrays must be internal degrees of freedom (site fractions), sorted. If this is not specified, points will be generated automatically. pdens : int, a dict of phase names to int, or a seq of both, optional Number of points to sample per degree of freedom. Default: 2000; Default when called from equilibrium(): 500 model : Model, a dict of phase names to Model, or a seq of both, optional Model class to use for each phase. sampler : callable, a dict of phase names to callable, or a seq of both, optional Function to sample phase constitution space. Must have same signature as 'pycalphad.core.utils.point_sample' grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True) Whether to add evenly spaced points between end-members. The density of points is determined by 'pdens' parameters : dict, optional Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database. phase_records : Optional[Mapping[str, PhaseRecord]] Mapping of phase names to PhaseRecord objects. Must include all active phases. The `model` argument must be a mapping of phase names to instances of Model objects. Callers must take care that the PhaseRecord objects were created with the same `output` as passed to `calculate`. Returns ------- Dataset of the sampled attribute as a function of state variables Examples -------- None yet. """""" # Here we check for any keyword arguments that are special, i.e., # there may be keyword arguments that aren't state variables pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000) points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None) callables = kwargs.pop('callables', {}) sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None) fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True), default_arg=True) model = kwargs.pop('model', None) parameters = parameters or dict() if isinstance(parameters, dict): parameters = OrderedDict(sorted(parameters.items(), key=str)) if isinstance(phases, str): phases = [phases] if isinstance(comps, (str, v.Species)): comps = [comps] comps = sorted(unpack_components(dbf, comps)) if points_dict is None and broadcast is False: raise ValueError('The \'points\' keyword argument must be specified if broadcast=False is also given.') nonvacant_components = [x for x in sorted(comps) if x.number_of_atoms > 0] nonvacant_elements = get_pure_elements(dbf, comps) all_phase_data = [] largest_energy = 1e10 # Consider only the active phases list_of_possible_phases = filter_phases(dbf, comps) if len(list_of_possible_phases) == 0: raise ConditionError('There are no phases in the Database that can be active with components {0}'.format(comps)) active_phases = filter_phases(dbf, comps, phases) if len(active_phases) == 0: raise ConditionError('None of the passed phases ({0}) are active. List of possible phases: {1}.'.format(phases, list_of_possible_phases)) if isinstance(output, (list, tuple, set)): raise NotImplementedError('Only one property can be specified in calculate() at a time') output = output if output is not None else 'GM' # Implicitly add 'N' state variable as a string to keyword arguements if it's not passed if kwargs.get('N') is None: kwargs['N'] = 1 if np.any(np.array(kwargs['N']) != 1): raise ConditionError('N!=1 is not yet supported, got N={}'.format(kwargs['N'])) # TODO: conditions dict of StateVariable instances should become part of the calculate API statevar_strings = [sv for sv in kwargs.keys() if getattr(v, sv) is not None] # If we don't do this, sympy will get confused during substitution statevar_dict = dict((v.StateVariable(key), unpack_condition(value)) for key, value in kwargs.items() if key in statevar_strings) # Sort after default state variable check to fix gh-116 statevar_dict = OrderedDict(sorted(statevar_dict.items(), key=lambda x: str(x[0]))) str_statevar_dict = OrderedDict((str(key), unpack_condition(value)) for (key, value) in statevar_dict.items()) # Build phase records if they weren't passed if phase_records is None: models = instantiate_models(dbf, comps, active_phases, model=model, parameters=parameters) phase_records = build_phase_records(dbf, comps, active_phases, statevar_dict, models=models, parameters=parameters, output=output, callables=callables, build_gradients=False, build_hessians=False, verbose=kwargs.pop('verbose', False)) else: # phase_records were provided, instantiated models must also be provided by the caller models = model if not isinstance(models, Mapping): raise ValueError(""A dictionary of instantiated models must be passed to `equilibrium` with the `model` argument if the `phase_records` argument is used."") active_phases_without_models = [name for name in active_phases if not isinstance(models.get(name), Model)] active_phases_without_phase_records = [name for name in active_phases if not isinstance(phase_records.get(name), PhaseRecord)] if len(active_phases_without_phase_records) > 0: raise ValueError(f""phase_records must contain a PhaseRecord instance for every active phase. Missing PhaseRecord objects for {sorted(active_phases_without_phase_records)}"") if len(active_phases_without_models) > 0: raise ValueError(f""model must contain a Model instance for every active phase. Missing Model objects for {sorted(active_phases_without_models)}"") maximum_internal_dof = max(len(models[phase_name].site_fractions) for phase_name in active_phases) for phase_name in sorted(active_phases): mod = models[phase_name] phase_record = phase_records[phase_name] points = points_dict[phase_name] if points is None: points = _sample_phase_constitution(mod, sampler_dict[phase_name] or point_sample, fixedgrid_dict[phase_name], pdens_dict[phase_name]) points = np.atleast_2d(points) fp = fake_points and (phase_name == sorted(active_phases)[0]) phase_ds = _compute_phase_values(nonvacant_components, str_statevar_dict, points, phase_record, output, maximum_internal_dof, broadcast=broadcast, parameters=parameters, largest_energy=float(largest_energy), fake_points=fp) all_phase_data.append(phase_ds) fp_offset = len(nonvacant_elements) if fake_points else 0 running_total = [fp_offset] + list(np.cumsum([phase_ds['X'].shape[-2] for phase_ds in all_phase_data])) islice_by_phase = {phase_name: slice(running_total[phase_idx], running_total[phase_idx+1], None) for phase_idx, phase_name in enumerate(sorted(active_phases))} # speedup for single-phase case (found by profiling) if len(all_phase_data) > 1: concatenated_coords = all_phase_data[0].coords data_vars = all_phase_data[0].data_vars concatenated_data_vars = {} for var in data_vars.keys(): data_coords = data_vars[var][0] points_idx = data_coords.index('points') # concatenation axis arrs = [] for phase_data in all_phase_data: arrs.append(getattr(phase_data, var)) concat_data = np.concatenate(arrs, axis=points_idx) concatenated_data_vars[var] = (data_coords, concat_data) final_ds = LightDataset(data_vars=concatenated_data_vars, coords=concatenated_coords) else: final_ds = all_phase_data[0] final_ds.attrs['phase_indices'] = islice_by_phase if to_xarray: return final_ds.get_dataset() else: return final_ds " 42858,"def graph_embed(mat, max_mean_photon=1.0, make_traceless=True, tol=6): r"""""" Given an symmetric adjacency matrix (that can be complex in general), it returns the squeezing parameter and interferometer necessary for implementing it in GBS. Args: mat (array): square symmetric complex (or real or integer) array max_mean_photon (float): threshold value. It guarantees that the mode with the largest squeezing has ``max_mean_photon`` as the mean photon number i.e. :math:`sinh(r_{max})^2 == max_mean_photon` make_traceless (boolean): removes the trace of the input matrix. tol (int): the number of decimal places to check the input matrix is symmetric Returns: tuple(array, array): Tuple containing the squeezing parameters of the input state to the interferometer, and the unitary matrix representing the interferometer """""" (m, n) = mat.shape if m != n: raise ValueError(""The Matrix is not square"") if np.round(np.linalg.norm(mat-np.transpose(mat)), tol) != 0: raise ValueError(""The input matrix is not symmetric"") if make_traceless: A = mat - np.trace(mat)*np.identity(n)/n s, U = takagi(A) sc = np.sqrt(1.0+1.0/max_mean_photon) vals = -np.arctanh(s/(s[0]*sc)) return vals, U ","def graph_embed(mat, max_mean_photon=1.0, make_traceless=True, tol=6): r"""""" Given an symmetric adjacency matrix (that can be complex in general), it returns the squeezing parameter and interferometer necessary for implementing it in GBS. Args: mat (array): square symmetric complex (or real or integer) array max_mean_photon (float): threshold value. It guarantees that the mode with the largest squeezing has ``max_mean_photon`` as the mean photon number i.e. :math:`sinh(r_{max})^2 == max_mean_photon` make_traceless (boolean): removes the trace of the input matrix. tol (int): the number of decimal places used to verify that the input matrix is symmetric Returns: tuple(array, array): Tuple containing the squeezing parameters of the input state to the interferometer, and the unitary matrix representing the interferometer """""" (m, n) = mat.shape if m != n: raise ValueError(""The Matrix is not square"") if np.round(np.linalg.norm(mat-np.transpose(mat)), tol) != 0: raise ValueError(""The input matrix is not symmetric"") if make_traceless: A = mat - np.trace(mat)*np.identity(n)/n s, U = takagi(A) sc = np.sqrt(1.0+1.0/max_mean_photon) vals = -np.arctanh(s/(s[0]*sc)) return vals, U " 57100,"def _get_memcache_key( namespace: str, sub_namespace: str | None, obj_id: str ) -> str: """"""Returns a memcache key for the class under the corresponding namespace and sub_namespace. Args: namespace: str. The namespace under which the values associated with the id lie. Use CACHE_NAMESPACE_DEFAULT as the namespace for ids that are not associated with a conceptual domain-layer entity and therefore don't require serialization. sub_namespace: str|None. The sub-namespace further differentiates the values. For Explorations, Skills, Stories, Topics, and Collections, the sub-namespace is the stringified version number of the objects. obj_id: str. The id of the value to store in the memory cache. Raises: Exception. The sub-namespace contains a ':'. Returns: str. The generated key for use in the memory cache in order to differentiate a passed-in key based on namespace and sub-namespace. """""" sub_namespace_key_string = (sub_namespace or '') if MEMCACHE_KEY_DELIMITER in sub_namespace_key_string: raise ValueError( 'Sub-namespace %s cannot contain \':\'.' % sub_namespace_key_string) return '%s%s%s%s%s' % ( namespace, MEMCACHE_KEY_DELIMITER, sub_namespace_key_string, MEMCACHE_KEY_DELIMITER, obj_id) ","def _get_memcache_key( namespace: str, sub_namespace: str | None, obj_id: str ) -> str: """"""Returns a memcache key for the class under the corresponding namespace and sub_namespace. Args: namespace: str. The namespace under which the values associated with the id lie. Use CACHE_NAMESPACE_DEFAULT as the namespace for ids that are not associated with a conceptual domain-layer entity and therefore don't require serialization. sub_namespace: str|None. The sub-namespace further differentiates the values. For Explorations, Skills, Stories, Topics, and Collections, the sub-namespace is the stringified version number of the objects. obj_id: str. The id of the value to store in the memory cache. Raises: Exception. The sub-namespace contains a ':'. Returns: str. The generated key for use in the memory cache in order to differentiate a passed-in key based on namespace and sub-namespace. """""" sub_namespace_key_string = (sub_namespace or '') if MEMCACHE_KEY_DELIMITER in sub_namespace_key_string: raise ValueError( 'Sub-namespace %s cannot contain \':\'.' % sub_namespace_key_string) return '%s%s%s%s%s' % ( namespace, MEMCACHE_KEY_DELIMITER, sub_namespace_key_string, MEMCACHE_KEY_DELIMITER, obj_id) " 31693,"def test_module(client: Client) -> str: """""" :type client: ``Client`` :param client: IOCParser client to use :return: 'ok' if test passed, anything else will fail the test. :rtype: ``str`` """""" response = client.ioc_from_url('https://pastebin.com/iMzrRXbJ') if (response.get('status') == 'fail') \ or (response.get('status') == 'error') \ or (response.get('status') is None): return 'Failed to connect with the API' return 'ok' ","def test_module(client: Client) -> str: """""" :type client: ``Client`` :param client: IOCParser client to use :return: 'ok' if test passed, anything else will fail the test. :rtype: ``str`` """""" response = client.ioc_from_url('https://pastebin.com/iMzrRXbJ') if (response.get('status') in ['fail', 'error', None]): return 'Failed to connect with the API' return 'ok' " 12811,"def delegate(parsed_arguments): if not parsed_arguments.delegatee: raise exceptions.Error( '--delegatee must be set to perform the delegation.') if parsed_arguments.delegatee in ('root', 'snapshot', 'timestamp', 'targets'): raise exceptions.Error( 'Cannot delegate to the top-level role: ' + repr(parsed_arguments.delegatee)) if not parsed_arguments.pubkeys: raise exceptions.Error( '--pubkeys must be set to perform the delegation.') public_keys = [] for public_key in parsed_arguments.pubkeys: imported_pubkey = import_publickey_from_file(public_key) public_keys.append(imported_pubkey) repository = repo_tool.load_repository( os.path.join(parsed_arguments.path, REPO_DIR)) if parsed_arguments.role == 'targets': repository.targets.delegate(parsed_arguments.delegatee, public_keys, parsed_arguments.delegate, parsed_arguments.threshold, parsed_arguments.terminating, list_of_targets=None, path_hash_prefixes=None) # Use key from --sign if present or use the default target_key for key in parsed_arguments.sign or [os.path.join(KEYSTORE_DIR, TARGETS_KEY_NAME)]: targets_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, key), parsed_arguments.targets_pw ) repository.targets.load_signing_key(targets_private) # Generate the delegate targets file for key in parsed_arguments.pubkeys: delegate_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, key[:-4]), parsed_arguments.pw, ) repository.targets(parsed_arguments.delegatee).load_signing_key(delegate_private) # A delegated (non-top-level-Targets) role. else: repository.targets(parsed_arguments.role).delegate( parsed_arguments.delegatee, public_keys, parsed_arguments.delegate, parsed_arguments.threshold, parsed_arguments.terminating, list_of_targets=None, path_hash_prefixes=None) # Update the required top-level roles, Snapshot and Timestamp, to make a new # release. Automatically making a new release can be disabled via # --no_release. if not parsed_arguments.no_release: snapshot_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), parsed_arguments.snapshot_pw) timestamp_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, KEYSTORE_DIR, TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) repository.snapshot.load_signing_key(snapshot_private) repository.timestamp.load_signing_key(timestamp_private) consistent_snapshot = roledb.get_roleinfo('root', repository._repository_name)['consistent_snapshot'] repository.writeall(consistent_snapshot=consistent_snapshot) # Move staged metadata directory to ""live"" metadata directory. write_to_live_repo(parsed_arguments) ","def delegate(parsed_arguments): if not parsed_arguments.delegatee: raise exceptions.Error( '--delegatee must be set to perform the delegation.') if parsed_arguments.delegatee in ('root', 'snapshot', 'timestamp', 'targets'): raise exceptions.Error( 'Cannot delegate to the top-level role: ' + repr(parsed_arguments.delegatee)) if not parsed_arguments.pubkeys: raise exceptions.Error( '--pubkeys must be set to perform the delegation.') public_keys = [] for public_key in parsed_arguments.pubkeys: imported_pubkey = import_publickey_from_file(public_key) public_keys.append(imported_pubkey) repository = repo_tool.load_repository( os.path.join(parsed_arguments.path, REPO_DIR)) if parsed_arguments.role == 'targets': repository.targets.delegate(parsed_arguments.delegatee, public_keys, parsed_arguments.delegate, parsed_arguments.threshold, parsed_arguments.terminating, list_of_targets=None, path_hash_prefixes=None) # Use key from --sign if present or use the default target_key for key in parsed_arguments.sign or [os.path.join(KEYSTORE_DIR, TARGETS_KEY_NAME)]: targets_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, key), parsed_arguments.targets_pw ) repository.targets.load_signing_key(targets_private) # Generate the delegate targets file for key in parsed_arguments.pubkeys: delegatee_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, key[:-4]), parsed_arguments.pw, ) repository.targets(parsed_arguments.delegatee).load_signing_key(delegate_private) # A delegated (non-top-level-Targets) role. else: repository.targets(parsed_arguments.role).delegate( parsed_arguments.delegatee, public_keys, parsed_arguments.delegate, parsed_arguments.threshold, parsed_arguments.terminating, list_of_targets=None, path_hash_prefixes=None) # Update the required top-level roles, Snapshot and Timestamp, to make a new # release. Automatically making a new release can be disabled via # --no_release. if not parsed_arguments.no_release: snapshot_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), parsed_arguments.snapshot_pw) timestamp_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, KEYSTORE_DIR, TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) repository.snapshot.load_signing_key(snapshot_private) repository.timestamp.load_signing_key(timestamp_private) consistent_snapshot = roledb.get_roleinfo('root', repository._repository_name)['consistent_snapshot'] repository.writeall(consistent_snapshot=consistent_snapshot) # Move staged metadata directory to ""live"" metadata directory. write_to_live_repo(parsed_arguments) " 2056,"def inplace_row_scale(X, scale): """""" Inplace row scaling of a CSR or CSC matrix. Scale each row of the data matrix by multiplying with specific scale provided by the caller assuming a (n_samples, n_features) shape. Parameters ---------- X : CSR or CSC sparse matrix of shape (n_samples, n_features) Matrix to be scaled. scale : ndarray of float of shape (n_features,) Array of precomputed sample-wise values to use for scaling. """""" if isinstance(X, sp.csc_matrix): inplace_csr_column_scale(X.T, scale) elif isinstance(X, sp.csr_matrix): inplace_csr_row_scale(X, scale) else: _raise_typeerror(X) ","def inplace_row_scale(X, scale): """""" Inplace row scaling of a CSR or CSC matrix. Scale each row of the data matrix by multiplying with specific scale provided by the caller assuming a (n_samples, n_features) shape. Parameters ---------- X : CSR or CSC sparse matrix of shape (n_samples, n_features) Matrix to be scaled. scale : ndarray of shape (n_features,), dtype=float Array of precomputed sample-wise values to use for scaling. """""" if isinstance(X, sp.csc_matrix): inplace_csr_column_scale(X.T, scale) elif isinstance(X, sp.csr_matrix): inplace_csr_row_scale(X, scale) else: _raise_typeerror(X) " 35737,"def make_segmentation_masks( image_sizes=((32, 32), (32, 42)), dtypes=(torch.long,), extra_dims=((), (4,)), ): for image_size, dtype, extra_dims_ in itertools.product(image_sizes, dtypes, extra_dims): yield make_segmentation_mask(size=image_size, dtype=dtype, extra_dims=extra_dims_) ","def make_segmentation_masks( image_sizes=((32, 32), (32, 42), (42, 32)), dtypes=(torch.long,), extra_dims=((), (4,)), ): for image_size, dtype, extra_dims_ in itertools.product(image_sizes, dtypes, extra_dims): yield make_segmentation_mask(size=image_size, dtype=dtype, extra_dims=extra_dims_) " 3945,"def read_log_stream_for_action_run( action_run_id: str, component: str, min_date: Optional[datetime.datetime], max_date: Optional[datetime.datetime], paasta_cluster: Optional[str], max_lines: Optional[int] = 1000, ) -> List[str]: if min_date is None: return [f""{action_run_id} has not started yet.""] if scribereader is None: return [""Scribereader (an internal Yelp package) is not available - unable to display logs.""] if get_scribereader_host_and_port() is None: return [ ""Unable to determine where Tron is located. If you're seeing this inside Yelp, report this to #compute-infra"" ] host, port = get_scribereader_host_and_port() # type: ignore # the None case is covered by the check above # this should never fail since get_scribereader_host_and_port() will have also called get_superregion() and we've ensured that # that file exists by getting to this point if paasta_cluster is None: paasta_cluster = get_superregion() today = datetime.date.today() start_date = min_date.date() end_date = max_date.date() if max_date else None use_tailer = today in {start_date, end_date} use_reader = start_date != today and end_date is not None if end_date is not None and end_date == today: end_date -= datetime.timedelta(days=1) namespace, job_name, run_num, action = action_run_id.split(""."") # in our logging infra, things are logged to per-instance streams - but # since Tron PaaSTA instances are of the form `job_name.action`, we need # to escape the period since some parts of our infra will reject streams # containing them - thus, the ""weird"" __ separator stream_name = f""stream_paasta_app_output_{namespace}_{job_name}__{action}"" output: List[Tuple[str, str]] = [] malformed_lines = 0 lines = 0 truncated_output = False # We'll only use a stream reader for logs from not-today. # that said, it's possible that an action spans more than a single day - in this case, we'll first read ""historical"" data from # the reader and then follow-up with today's logs from a stream tailer. # NOTE: this is more-or-less what our internal `scribereader` binary does if use_reader: with scribereader.get_stream_reader( stream_name=stream_name, min_date=min_date, max_date=max_date, reader_host=host, reader_port=port, ) as stream: for line in stream: if max_lines is not None and lines == max_lines: truncated_output = True break try: payload = json.loads(line) except json.decoder.JSONDecodeError: log.error(f""Unable to decode log line from stream ({stream_name}) for {action_run_id}: {line}"") malformed_lines += 1 continue if ( payload.get(""tron_run_number"") == int(run_num) and payload.get(""component"") == component and payload.get(""message"") is not None and payload.get(""timestamp"") is not None and payload.get(""cluster"") == paasta_cluster ): output.append((payload[""timestamp""], payload[""message""])) lines += 1 if use_tailer: stream = scribereader.get_stream_tailer( stream_name=stream_name, tailing_host=host, tailing_port=port, lines=-1, ) try: for line in stream: if lines == max_lines: truncated_output = True break try: payload = json.loads(line) except json.decoder.JSONDecodeError: log.error(f""Unable to decode log line from stream ({stream_name}) for {action_run_id}: {line}"") malformed_lines += 1 continue if ( payload.get(""tron_run_number"") == int(run_num) and payload.get(""component"") == component and payload.get(""message"") is not None and payload.get(""timestamp"") is not None and payload.get(""cluster"") == paasta_cluster ): output.append((payload[""timestamp""], payload[""message""])) lines += 1 except StreamTailerSetupError: return [ f""No data in stream {stream_name} - if this is the first time this action has run and you expected "" ""output, please wait a couple minutes and refresh."" ] except socket.timeout: return [ f""Unable to connect to stream {stream_name} - if this is the first time this action has run and you "" ""expected output, please wait a couple minutes and refresh."" ] finally: stream.close() if truncated_output: formated_today = datetime.datetime.now(datetime.timezone.utc).strftime(""%Y-%m-%dT%H:%M:%S.%fZ"") output.append( ( formated_today, f""This output is truncated. Use this command to view all lines 'scribereader -s {paasta_cluster} -f stream_paasta_app_output_{namespace}_{job_name}__{action}'"", ) ) # XXX: for some reason, we're occasionally getting data out of order from scribereader - so we'll sort based on # timestamp until we can figure out what's causing this. output.sort(key=operator.itemgetter(0)) return [line for _, line in output] + ( [f""{malformed_lines} encountered while retrieving logs""] if malformed_lines else [] ) ","def read_log_stream_for_action_run( action_run_id: str, component: str, min_date: Optional[datetime.datetime], max_date: Optional[datetime.datetime], paasta_cluster: Optional[str], max_lines: Optional[int] = 1000, ) -> List[str]: if min_date is None: return [f""{action_run_id} has not started yet.""] if scribereader is None: return [""Scribereader (an internal Yelp package) is not available - unable to display logs.""] if get_scribereader_host_and_port() is None: return [ ""Unable to determine where Tron is located. If you're seeing this inside Yelp, report this to #compute-infra"" ] host, port = get_scribereader_host_and_port() # type: ignore # the None case is covered by the check above # this should never fail since get_scribereader_host_and_port() will have also called get_superregion() and we've ensured that # that file exists by getting to this point if paasta_cluster is None: paasta_cluster = get_superregion() today = datetime.date.today() start_date = min_date.date() end_date = max_date.date() if max_date else None use_tailer = today in {start_date, end_date} use_reader = start_date != today and end_date is not None if end_date is not None and end_date == today: end_date -= datetime.timedelta(days=1) namespace, job_name, run_num, action = action_run_id.split(""."") # in our logging infra, things are logged to per-instance streams - but # since Tron PaaSTA instances are of the form `job_name.action`, we need # to escape the period since some parts of our infra will reject streams # containing them - thus, the ""weird"" __ separator stream_name = f""stream_paasta_app_output_{namespace}_{job_name}__{action}"" output: List[Tuple[str, str]] = [] malformed_lines = 0 lines = 0 truncated_output = False # We'll only use a stream reader for logs from not-today. # that said, it's possible that an action spans more than a single day - in this case, we'll first read ""historical"" data from # the reader and then follow-up with today's logs from a stream tailer. # NOTE: this is more-or-less what our internal `scribereader` binary does if use_reader: with scribereader.get_stream_reader( stream_name=stream_name, min_date=min_date, max_date=max_date, reader_host=host, reader_port=port, ) as stream: for line in stream: if max_lines is not None and lines == max_lines: truncated_output = True break try: payload = json.loads(line) except json.decoder.JSONDecodeError: log.error(f""Unable to decode log line from stream ({stream_name}) for {action_run_id}: {line}"") malformed_lines += 1 continue if ( payload.get(""tron_run_number"") == int(run_num) and payload.get(""component"") == component and payload.get(""message"") is not None and payload.get(""timestamp"") is not None and payload.get(""cluster"") == paasta_cluster ): output.append((payload[""timestamp""], payload[""message""])) lines += 1 if use_tailer: stream = scribereader.get_stream_tailer( stream_name=stream_name, tailing_host=host, tailing_port=port, lines=-1, ) try: for line in stream: if lines == max_lines: truncated_output = True break try: payload = json.loads(line) except json.decoder.JSONDecodeError: log.error(f""Unable to decode log line from stream ({stream_name}) for {action_run_id}: {line}"") malformed_lines += 1 continue if ( payload.get(""tron_run_number"") == int(run_num) and payload.get(""component"") == component and payload.get(""message"") is not None and payload.get(""timestamp"") is not None and payload.get(""cluster"") == paasta_cluster ): output.append((payload[""timestamp""], payload[""message""])) lines += 1 except StreamTailerSetupError: return [ f""No data in stream {stream_name} - if this is the first time this action has run and you expected "" ""output, please wait a couple minutes and refresh."" ] except socket.timeout: return [ f""Unable to connect to stream {stream_name} - if this is the first time this action has run and you "" ""expected output, please wait a couple minutes and refresh."" ] finally: stream.close() if truncated_output: formated_today = datetime.datetime.now(datetime.timezone.utc).strftime(""%Y-%m-%dT%H:%M:%S.%fZ"") output.append( ( formated_today, f""This output is truncated. Use this command to view all lines 'scribereader -s {paasta_cluster} -f {stream_name}'"", ) ) # XXX: for some reason, we're occasionally getting data out of order from scribereader - so we'll sort based on # timestamp until we can figure out what's causing this. output.sort(key=operator.itemgetter(0)) return [line for _, line in output] + ( [f""{malformed_lines} encountered while retrieving logs""] if malformed_lines else [] ) " 15542,"def period_or_cron(config): """"""Check that if cron pattern is used, then meter type and offsite must be removed."""""" if CONF_CRON_PATTERN in config and CONF_METER_TYPE in config: raise vol.Invalid( f""You can either use <{CONF_CRON_PATTERN}> or <{CONF_METER_TYPE}>"" ) if ( CONF_CRON_PATTERN in config and CONF_METER_OFFSET in config and config[CONF_METER_OFFSET] != DEFAULT_OFFSET ): raise vol.Invalid( f""When <{CONF_CRON_PATTERN}> is used <{CONF_METER_OFFSET}> has no meaning and must be removed"" ) return config ","def period_or_cron(config): """"""Check that if cron pattern is used, then meter type and offsite must be removed."""""" if CONF_CRON_PATTERN in config and CONF_METER_TYPE in config: raise vol.Invalid( f""Use <{CONF_CRON_PATTERN}> or <{CONF_METER_TYPE}>"" ) if ( CONF_CRON_PATTERN in config and CONF_METER_OFFSET in config and config[CONF_METER_OFFSET] != DEFAULT_OFFSET ): raise vol.Invalid( f""When <{CONF_CRON_PATTERN}> is used <{CONF_METER_OFFSET}> has no meaning and must be removed"" ) return config " 31129,"def download_and_extract_index(storage_bucket: Bucket, extract_destination_path: str): """"""Downloads and extracts indexe zip from cloud storage. Args: storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where build index.zip is stored. extract_destination_path (str): the full path of extract folder. Returns: str: extracted build index folder full path. Blob: google cloud storage object that represents prod index.zip blob. Blob: google cloud storage object that represents build index.zip blob. str: downloaded prod index generation. str: downloaded build index generation. """""" if storage_bucket.name == GCPConfig.CI_BUILD_BUCKET: index_storage_path = os.path.join(GCPConfig.BUILD_BASE_PATH, f""{GCPConfig.INDEX_NAME}.zip"") else: index_storage_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, f""{GCPConfig.INDEX_NAME}.zip"") download_index_path = os.path.join(extract_destination_path, f""{GCPConfig.INDEX_NAME}.zip"") index_blob = storage_bucket.blob(index_storage_path) index_folder_path = os.path.join(extract_destination_path, GCPConfig.INDEX_NAME) if not os.path.exists(extract_destination_path): os.mkdir(extract_destination_path) if not index_blob.exists(): logging.error(f""No build index was found in path: {index_storage_path}"") sys.exit(1) index_blob.reload() index_generation = index_blob.generation index_blob.download_to_filename(download_index_path, if_generation_match=index_generation) if os.path.exists(download_index_path): with ZipFile(download_index_path, 'r') as index_zip: index_zip.extractall(extract_destination_path) if not os.path.exists(index_folder_path): logging.error(f""Failed creating build {GCPConfig.INDEX_NAME} folder with extracted data."") sys.exit(1) os.remove(download_index_path) logging.success(f""Finished downloading and extracting build {GCPConfig.INDEX_NAME} file to "" f""{extract_destination_path}"") return index_folder_path, index_blob, index_generation else: logging.error(f""Failed to download build {GCPConfig.INDEX_NAME}.zip file from cloud storage."") sys.exit(1) ","def download_and_extract_index(storage_bucket: Bucket, extract_destination_path: str): """"""Downloads and extracts index zip from cloud storage. Args: storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where build index.zip is stored. extract_destination_path (str): the full path of extract folder. Returns: str: extracted build index folder full path. Blob: google cloud storage object that represents prod index.zip blob. Blob: google cloud storage object that represents build index.zip blob. str: downloaded prod index generation. str: downloaded build index generation. """""" if storage_bucket.name == GCPConfig.CI_BUILD_BUCKET: index_storage_path = os.path.join(GCPConfig.BUILD_BASE_PATH, f""{GCPConfig.INDEX_NAME}.zip"") else: index_storage_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, f""{GCPConfig.INDEX_NAME}.zip"") download_index_path = os.path.join(extract_destination_path, f""{GCPConfig.INDEX_NAME}.zip"") index_blob = storage_bucket.blob(index_storage_path) index_folder_path = os.path.join(extract_destination_path, GCPConfig.INDEX_NAME) if not os.path.exists(extract_destination_path): os.mkdir(extract_destination_path) if not index_blob.exists(): logging.error(f""No build index was found in path: {index_storage_path}"") sys.exit(1) index_blob.reload() index_generation = index_blob.generation index_blob.download_to_filename(download_index_path, if_generation_match=index_generation) if os.path.exists(download_index_path): with ZipFile(download_index_path, 'r') as index_zip: index_zip.extractall(extract_destination_path) if not os.path.exists(index_folder_path): logging.error(f""Failed creating build {GCPConfig.INDEX_NAME} folder with extracted data."") sys.exit(1) os.remove(download_index_path) logging.success(f""Finished downloading and extracting build {GCPConfig.INDEX_NAME} file to "" f""{extract_destination_path}"") return index_folder_path, index_blob, index_generation else: logging.error(f""Failed to download build {GCPConfig.INDEX_NAME}.zip file from cloud storage."") sys.exit(1) " 58660,"def change_sections_in_yaml_file(data: Dict[Text, Any], filename: Text) -> None: """"""Changes specific sections (given by keys) in a yaml file. Args: data: The keys are the sections to be changed, the values the contents of the respective sections. filename: Name of the file to be changed. """""" old_content = read_file(filename, DEFAULT_ENCODING) yaml_parser = _get_yaml_parser(typ=""rt"", add_version=False) old_content = _fix_ascii_encoding(old_content) new_content = yaml_parser.load(old_content) or {} for key in data.keys(): new_content[key] = data[key] yaml_parser.dump(new_content, Path(filename)) ","def change_sections_in_yaml_file(data: Dict[Text, Any], filename: Text) -> None: """"""Changes specific sections (given by keys) in a yaml file. Args: data: The keys are the sections to be changed, the values the contents of the respective sections. filename: Name of the file to be changed. """""" old_content = read_file(filename, DEFAULT_ENCODING) yaml_parser = _get_yaml_parser(typ=""rt"", add_version=False) old_content = _fix_ascii_encoding(old_content) new_content = yaml_parser.load(old_content) or {} for key, value in data.items(): new_content[key] = value yaml_parser.dump(new_content, Path(filename)) " 22717,"def main(): print('Gather runtime data') try: subprocess.check_output(['choco', '--version']) except subprocess.CalledProcessError: raise RuntimeError('Error: Chocolatey (https://chocolatey.org/) needs' 'to be installed to run this script.') script_path = os.path.realpath(__file__) repo_path = os.path.dirname(os.path.dirname(script_path)) build_path = os.path.join(repo_path, 'windows-installer', 'build') venv_path = os.path.join(build_path, 'venv-config') venv_python = os.path.join(venv_path, 'Scripts', 'python.exe') installer_cfg_path = os.path.join(build_path, 'installer.cfg') wheels_path = os.path.join(build_path, 'wheels') certbot_version = subprocess.check_output([sys.executable, '-c', 'import certbot; print(certbot.__version__)'], universal_newlines=True, cwd=repo_path).strip() certbot_packages = ['acme', '.'] certbot_packages.extend([name for name in os.listdir(repo_path) if name.startswith('certbot-dns-')]) print('Copy assets') os.makedirs(build_path, exist_ok=True) shutil.copy(os.path.join(repo_path, 'windows-installer', 'certbot.ico'), build_path) shutil.copy(os.path.join(repo_path, 'windows-installer', 'run.py'), build_path) print('Prepare pynsist config') with open(os.path.join(installer_cfg_path), 'w') as file_h: file_h.write(""""""\ [Application] name=Certbot version={certbot_version} icon=certbot.ico publisher=Electronic Frontier Fundation script=run.py [Build] directory=nsis installer_name=certbot-{certbot_version}-win32_install.exe [Python] version=3.7.0 [Include] local_wheels=wheels\*.whl [Command certbot] entry_point=certbot.main:main """""".format(certbot_version=certbot_version)) print('Prepare build environment') subprocess.check_call([sys.executable, '-m', 'venv', '--clear', venv_path]) subprocess.check_call(['choco', 'upgrade', '-y', 'nsis']) subprocess.check_call([venv_python, '-m', 'pip', 'install', '--upgrade', 'pip']) shutil.rmtree(wheels_path, ignore_errors=True) os.makedirs(wheels_path, exist_ok=True) subprocess.check_call([venv_python, '-m', 'pip', 'install', 'wheel', 'pynsist']) print('Compile wheels') wheels_project = [os.path.join(repo_path, package) for package in certbot_packages] command = [venv_python, '-m', 'pip', 'wheel', '-w', wheels_path] command.extend(wheels_project) subprocess.check_call(command) print('Build the installer') subprocess.check_call([os.path.join(venv_path, 'Scripts', 'pynsist.exe'), installer_cfg_path]) print('Done') ","def main(): print('Gather runtime data') try: subprocess.check_output(['choco', '--version']) except subprocess.CalledProcessError: raise RuntimeError('Error: Chocolatey (https://chocolatey.org/) needs' 'to be installed to run this script.') script_path = os.path.realpath(__file__) repo_path = os.path.dirname(os.path.dirname(script_path)) build_path = os.path.join(repo_path, 'windows-installer', 'build') venv_path = os.path.join(build_path, 'venv-config') venv_python = os.path.join(venv_path, 'Scripts', 'python.exe') installer_cfg_path = os.path.join(build_path, 'installer.cfg') wheels_path = os.path.join(build_path, 'wheels') certbot_version = subprocess.check_output([sys.executable, '-c', 'import certbot; print(certbot.__version__)'], universal_newlines=True, cwd=repo_path).strip() certbot_packages = ['acme', '.'] certbot_packages.extend([name for name in os.listdir(repo_path) if name.startswith('certbot-dns-')]) print('Copy assets') os.makedirs(build_path, exist_ok=True) shutil.copy(os.path.join(repo_path, 'windows-installer', 'certbot.ico'), build_path) shutil.copy(os.path.join(repo_path, 'windows-installer', 'run.py'), build_path) print('Prepare pynsist config') with open(os.path.join(installer_cfg_path), 'w') as file_h: file_h.write(""""""\ [Application] name=Certbot version={certbot_version} icon=certbot.ico publisher=Electronic Frontier Fundation script=run.py [Build] directory=nsis installer_name=certbot-{certbot_version}-win32-installer.exe [Python] version=3.7.0 [Include] local_wheels=wheels\*.whl [Command certbot] entry_point=certbot.main:main """""".format(certbot_version=certbot_version)) print('Prepare build environment') subprocess.check_call([sys.executable, '-m', 'venv', '--clear', venv_path]) subprocess.check_call(['choco', 'upgrade', '-y', 'nsis']) subprocess.check_call([venv_python, '-m', 'pip', 'install', '--upgrade', 'pip']) shutil.rmtree(wheels_path, ignore_errors=True) os.makedirs(wheels_path, exist_ok=True) subprocess.check_call([venv_python, '-m', 'pip', 'install', 'wheel', 'pynsist']) print('Compile wheels') wheels_project = [os.path.join(repo_path, package) for package in certbot_packages] command = [venv_python, '-m', 'pip', 'wheel', '-w', wheels_path] command.extend(wheels_project) subprocess.check_call(command) print('Build the installer') subprocess.check_call([os.path.join(venv_path, 'Scripts', 'pynsist.exe'), installer_cfg_path]) print('Done') " 26414,"def get_policy_permissions(region: str) -> AllowedActionCollection: """""" Returns an action collection containing lists of all permission grant patterns keyed by resource that they are allowed upon. Requires AWS credentials to be associated with a user or assumed role. :param zone: AWS zone to connect to """""" iam: IAMClient = cast(IAMClient, get_client('iam', region)) sts: STSClient = cast(STSClient, get_client('sts', region)) #TODO Condider effect: deny at some point allowed_actions: AllowedActionCollection = defaultdict(lambda: {'Action': [], 'NotAction': []}) try: # If successful then we assume we are operating as a user, and grab the associated permissions user = iam.get_user() list_policies = iam.list_user_policies(UserName=user['User']['UserName']) attached_policies = iam.list_attached_user_policies(UserName=user['User']['UserName']) user_attached_policies = allowed_actions_attached(iam, attached_policies['AttachedPolicies']) allowed_actions = add_to_action_collection(allowed_actions, user_attached_policies) user_inline_policies = allowed_actions_users(iam, list_policies['PolicyNames'], user['User']['UserName']) allowed_actions = add_to_action_collection(allowed_actions, user_inline_policies) except: # If not successful, we check the role associated with an instance profile # and grab the role's associated permissions role = sts.get_caller_identity() # Splits a role arn of format 'arn:aws:sts::123456789012:assumed-role/my-role-name/my-role-session-name' # on ""/"" and takes the second element to get the role name to list policies try: role_name = role[""Arn""].split(""/"")[1] list_policies = iam.list_role_policies(RoleName=role_name) attached_policies = iam.list_attached_role_policies(RoleName=role_name) #logger.info(attached_policies) logger.debug(""Checking attached role policies"") role_attached_policies = allowed_actions_attached(iam, attached_policies['AttachedPolicies']) allowed_actions = add_to_action_collection(allowed_actions, role_attached_policies) logger.debug(""Checking inline role policies"") role_inline_policies = allowed_actions_roles(iam, list_policies['PolicyNames'], role_name) allowed_actions = add_to_action_collection(allowed_actions, role_inline_policies) except: logger.exception(""Exception when trying to get role policies"") logger.debug(""ALLOWED ACTIONS"") logger.debug(allowed_actions) return allowed_actions ","def get_policy_permissions(region: str) -> AllowedActionCollection: """""" Returns an action collection containing lists of all permission grant patterns keyed by resource that they are allowed upon. Requires AWS credentials to be associated with a user or assumed role. :param zone: AWS zone to connect to """""" iam: IAMClient = cast(IAMClient, get_client('iam', region)) sts: STSClient = cast(STSClient, get_client('sts', region)) #TODO Condider effect: deny at some point allowed_actions: AllowedActionCollection = defaultdict(lambda: {'Action': [], 'NotAction': []}) try: # If successful then we assume we are operating as a user, and grab the associated permissions user = iam.get_user() list_policies = iam.list_user_policies(UserName=user['User']['UserName']) attached_policies = iam.list_attached_user_policies(UserName=user['User']['UserName']) user_attached_policies = allowed_actions_attached(iam, attached_policies['AttachedPolicies']) allowed_actions = add_to_action_collection(allowed_actions, user_attached_policies) user_inline_policies = allowed_actions_users(iam, list_policies['PolicyNames'], user['User']['UserName']) allowed_actions = add_to_action_collection(allowed_actions, user_inline_policies) except: # If not successful, we check the role associated with an instance profile # and grab the role's associated permissions role = sts.get_caller_identity() # Splits a role arn of format 'arn:aws:sts::123456789012:assumed-role/my-role-name/my-role-session-name' # on ""/"" and takes the second element to get the role name to list policies try: role_name = role[""Arn""].split(""/"")[1] list_policies = iam.list_role_policies(RoleName=role_name) attached_policies = iam.list_attached_role_policies(RoleName=role_name) #logger.info(attached_policies) logger.debug(""Checking attached role policies"") role_attached_policies = allowed_actions_attached(iam, attached_policies['AttachedPolicies']) allowed_actions = add_to_action_collection(allowed_actions, role_attached_policies) logger.debug(""Checking inline role policies"") role_inline_policies = allowed_actions_roles(iam, list_policies['PolicyNames'], role_name) allowed_actions = add_to_action_collection(allowed_actions, role_inline_policies) except: logger.exception(""Exception when trying to get role policies"") logger.debug(""Allowed actions: %s"", allowed_actions) return allowed_actions " 43888,"def two_qubit_decomposition(U, wires): r""""""Recover the decomposition of a two-qubit matrix :math:`U` in terms of elementary operations. The work of `Shende, Markov, and Bullock (2003) `__ presents a fixed-form decomposition of :math:`U` in terms of single-qubit gates and CNOTs. Multiple such decompositions are possible (by choosing two of {``RX``, ``RY``, ``RZ``}). Here we choose the ``RY``, ``RZ`` case (fig. 2 in the above) to match with the default decomposition of the single-qubit ``Rot`` operations as ``RZ RY RZ``. The form of the decomposition is: .. figure:: ../../_static/two_qubit_decomposition.svg :align: center :width: 100% :target: javascript:void(0); where :math:`A, B, C, D` are :math:`SU(2)` gates. Args: U (tensor): A 4 x 4 unitary matrix. wires (Union[Wires, Sequence[int] or int]): The wires on which to apply the operation. Returns: list[qml.Operation]: A list of operations that represent the decomposition of the matrix U. """""" # First, test if we have a tensor product of two single-qubit operations. If # so, we don't actually need to do a decomposition. To test this, we can # check if Edag U E is in SO(4) because of the isomorphism between SO(4) and # SU(2) x SU(2). test_so4 = qml.math.linalg.multi_dot([Edag, U, E]) if qml.math.isclose(qml.math.linalg.det(test_so4), 1.0) and qml.math.allclose( qml.math.dot(test_so4, qml.math.T(test_so4)), qml.math.eye(4) ): A, B = _su2su2_to_tensor_products(U) A_ops = zyz_decomposition(A, wires[0]) B_ops = zyz_decomposition(B, wires[1]) return A_ops + B_ops # The final form of this decomposition is U = (A \otimes B) V (C \otimes D), # as expressed in the circuit below. # -U- = -C--X--RZ(d)--C---------X--A-| # -U- = -D--C--RY(b)--X--RY(a)--C--B-| # First, we note that this method works only for SU(4) gates, meaning that # we need to rescale the matrix by its determinant. Furthermore, we add a # SWAP as per v1 of 0308033, which helps with some rearranging of gates in # the decomposition (it will cancel out the fact that we need to add a SWAP # to fix the determinant in another part later). swap_U = qml.math.exp(1j * np.pi / 4) * qml.math.dot(SWAP, _convert_to_su4(U)) # Next, we can choose the angles of the RZ / RY rotations. See the docstring # within the function used below. This is to ensure U and V somehow maintain # a relationship between their spectra to ensure we can recover A, B, C, D. alpha, beta, delta = _select_rotation_angles(swap_U) # This is the interior portion of the decomposition circuit interior_decomp = [ qml.CNOT(wires=[wires[1], wires[0]]), qml.RZ(delta, wires=wires[0]), qml.RY(beta, wires=wires[1]), qml.CNOT(wires=[wires[0], wires[1]]), qml.RY(alpha, wires=wires[1]), qml.CNOT(wires=[wires[1], wires[0]]), ] # We need the matrix representation of this interior part, V, in order to # decompose U = (A \otimes B) V (C \otimes D) # # Looking at the decomposition above, V has determinant -1 (because there # are 3 CNOTs, each with determinant -1). The relationship between U and V # requires that both are in SU(4), so we add a SWAP after to V. We will see # how this gets fixed later. # # -V- = -X--RZ(d)--C---------X--SWAP-| # -V- = -C--RY(b)--X--RY(a)--C--SWAP-| RZd = qml.RZ(delta, wires=0).matrix RYb = qml.RY(beta, wires=0).matrix RYa = qml.RY(alpha, wires=0).matrix V = qml.math.linalg.multi_dot( [ SWAP, CNOT10, qml.math.kron(qml.math.eye(2), RYa), CNOT01, qml.math.kron(RZd, RYb), CNOT10, ] ) # Now we need to find the four SU(2) operations A, B, C, D A, B, C, D = _extract_su2su2_prefactors(swap_U, V) # At this point, we have the following: # -U-SWAP- = --C--X-RZ(d)-C-------X-SWAP--A| # -U-SWAP- = --D--C-RZ(b)-X-RY(a)-C-SWAP--B| # # Using the relationship that SWAP(A \otimes B) SWAP = B \otimes A, # -U-SWAP- = --C--X-RZ(d)-C-------X--B--SWAP-| # -U-SWAP- = --D--C-RZ(b)-X-RY(a)-C--A--SWAP-| # # Now the SWAPs cancel, giving us the desired decomposition # (up to a global phase). # -U- = --C--X-RZ(d)-C-------X--B--| # -U- = --D--C-RZ(b)-X-RY(a)-C--A--| A_ops = zyz_decomposition(A, wires[1]) B_ops = zyz_decomposition(B, wires[0]) C_ops = zyz_decomposition(C, wires[0]) D_ops = zyz_decomposition(D, wires[1]) # Return the full decomposition return C_ops + D_ops + interior_decomp + A_ops + B_ops ","def two_qubit_decomposition(U, wires): r""""""Recover the decomposition of a two-qubit matrix :math:`U` in terms of elementary operations. The work of `Shende, Markov, and Bullock (2003) `__ presents a fixed-form decomposition of :math:`U` in terms of single-qubit gates and CNOTs. Multiple such decompositions are possible (by choosing two of ``{RX, RY, RZ}``). Here we choose the ``RY``, ``RZ`` case (fig. 2 in the above) to match with the default decomposition of the single-qubit ``Rot`` operations as ``RZ RY RZ``. The form of the decomposition is: .. figure:: ../../_static/two_qubit_decomposition.svg :align: center :width: 100% :target: javascript:void(0); where :math:`A, B, C, D` are :math:`SU(2)` gates. Args: U (tensor): A 4 x 4 unitary matrix. wires (Union[Wires, Sequence[int] or int]): The wires on which to apply the operation. Returns: list[qml.Operation]: A list of operations that represent the decomposition of the matrix U. """""" # First, test if we have a tensor product of two single-qubit operations. If # so, we don't actually need to do a decomposition. To test this, we can # check if Edag U E is in SO(4) because of the isomorphism between SO(4) and # SU(2) x SU(2). test_so4 = qml.math.linalg.multi_dot([Edag, U, E]) if qml.math.isclose(qml.math.linalg.det(test_so4), 1.0) and qml.math.allclose( qml.math.dot(test_so4, qml.math.T(test_so4)), qml.math.eye(4) ): A, B = _su2su2_to_tensor_products(U) A_ops = zyz_decomposition(A, wires[0]) B_ops = zyz_decomposition(B, wires[1]) return A_ops + B_ops # The final form of this decomposition is U = (A \otimes B) V (C \otimes D), # as expressed in the circuit below. # -U- = -C--X--RZ(d)--C---------X--A-| # -U- = -D--C--RY(b)--X--RY(a)--C--B-| # First, we note that this method works only for SU(4) gates, meaning that # we need to rescale the matrix by its determinant. Furthermore, we add a # SWAP as per v1 of 0308033, which helps with some rearranging of gates in # the decomposition (it will cancel out the fact that we need to add a SWAP # to fix the determinant in another part later). swap_U = qml.math.exp(1j * np.pi / 4) * qml.math.dot(SWAP, _convert_to_su4(U)) # Next, we can choose the angles of the RZ / RY rotations. See the docstring # within the function used below. This is to ensure U and V somehow maintain # a relationship between their spectra to ensure we can recover A, B, C, D. alpha, beta, delta = _select_rotation_angles(swap_U) # This is the interior portion of the decomposition circuit interior_decomp = [ qml.CNOT(wires=[wires[1], wires[0]]), qml.RZ(delta, wires=wires[0]), qml.RY(beta, wires=wires[1]), qml.CNOT(wires=[wires[0], wires[1]]), qml.RY(alpha, wires=wires[1]), qml.CNOT(wires=[wires[1], wires[0]]), ] # We need the matrix representation of this interior part, V, in order to # decompose U = (A \otimes B) V (C \otimes D) # # Looking at the decomposition above, V has determinant -1 (because there # are 3 CNOTs, each with determinant -1). The relationship between U and V # requires that both are in SU(4), so we add a SWAP after to V. We will see # how this gets fixed later. # # -V- = -X--RZ(d)--C---------X--SWAP-| # -V- = -C--RY(b)--X--RY(a)--C--SWAP-| RZd = qml.RZ(delta, wires=0).matrix RYb = qml.RY(beta, wires=0).matrix RYa = qml.RY(alpha, wires=0).matrix V = qml.math.linalg.multi_dot( [ SWAP, CNOT10, qml.math.kron(qml.math.eye(2), RYa), CNOT01, qml.math.kron(RZd, RYb), CNOT10, ] ) # Now we need to find the four SU(2) operations A, B, C, D A, B, C, D = _extract_su2su2_prefactors(swap_U, V) # At this point, we have the following: # -U-SWAP- = --C--X-RZ(d)-C-------X-SWAP--A| # -U-SWAP- = --D--C-RZ(b)-X-RY(a)-C-SWAP--B| # # Using the relationship that SWAP(A \otimes B) SWAP = B \otimes A, # -U-SWAP- = --C--X-RZ(d)-C-------X--B--SWAP-| # -U-SWAP- = --D--C-RZ(b)-X-RY(a)-C--A--SWAP-| # # Now the SWAPs cancel, giving us the desired decomposition # (up to a global phase). # -U- = --C--X-RZ(d)-C-------X--B--| # -U- = --D--C-RZ(b)-X-RY(a)-C--A--| A_ops = zyz_decomposition(A, wires[1]) B_ops = zyz_decomposition(B, wires[0]) C_ops = zyz_decomposition(C, wires[0]) D_ops = zyz_decomposition(D, wires[1]) # Return the full decomposition return C_ops + D_ops + interior_decomp + A_ops + B_ops " 19992,"def find_color_card(img, threshold='adaptgauss', threshvalue=125, blurry=False, background='dark'): """"""Automatically detects a color card and output info to use in create_color_card_mask function Inputs: img = Input RGB image data containing a color card. threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss) thresh_value = Thresholding value, optional (default 125) blurry = Bool (default False) if True then image sharpening applied background = Type of image background either 'dark' or 'light (default 'dark'); if 'light' then histogram expansion applied to better detect edges, but histogram expansion will be hindered if there is a dark background Returns: df = Dataframe containing information about the filtered contours start_coord = Two element tuple of starting coordinates, location of the top left pixel detected spacing = Two element tuple of spacing between centers of chips :param img: numpy.ndarray :param threshold: str :param threshvalue: int :param blurry: bool :param background: str :return df: pandas.core.frame.DataFrame :return start_coord: tuple :return spacing: tuple """""" # Imports import skimage import pandas as pd from scipy.spatial.distance import squareform, pdist # Get image attributes height, width, channels = img.shape totalpx = float(height * width) # Minimum and maximum square size based upon 12 MP image minarea = 1000. / 12000000. * totalpx maxarea = 8000000. / 12000000. * totalpx # Create gray image for further processing gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Laplacian Fourier Transform detection of blurriness blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var() # If image is blurry then try to deblur using kernel if blurry: # from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening kernel = np.array([[-1, -1, -1, -1, -1], [-1, 2, 2, 2, -1], [-1, 2, 8, 2, -1], [-1, 2, 2, 2, -1], [-1, -1, -1, -1, -1]]) / 8.0 # Store result back out for further processing gray_img = cv2.filter2D(gray_img, -1, kernel) # In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu # thresholding. If your image has a bright background then apply if background == 'light': clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4)) # apply CLAHE histogram expansion to find squares better with canny edge detection gray_img = clahe.apply(gray_img) elif background != 'dark': fatal_error('Background parameter ' + str(background) + ' is not ""light"" or ""dark""!') # Thresholding if threshold == ""otsu"": # Blur slightly so defects on card squares and background patterns are less likely to be picked up gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0) ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) elif threshold == ""normal"": # Blur slightly so defects on card squares and background patterns are less likely to be picked up gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0) ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY) elif threshold == ""adaptgauss"": # Blur slightly so defects on card squares and background patterns are less likely to be picked up gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0) threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 51, 2) else: fatal_error('Threshold ' + str(threshold) + ' is not ""otsu"", ""normal"", or ""adaptgauss""!') # Apply automatic Canny edge detection using the computed median edges = skimage.feature.canny(threshold) edges.dtype = 'uint8' # Compute contours to find the squares of the card _, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Variable of which contour is which mindex = [] # Variable to store moments mu = [] # Variable to x,y coordinates in tuples mc = [] # Variable to x coordinate as integer mx = [] # Variable to y coordinate as integer my = [] # Variable to store area marea = [] # Variable to store whether something is a square (1) or not (0) msquare = [] # Variable to store square approximation coordinates msquarecoords = [] # Variable to store child hierarchy element mchild = [] # Fitted rectangle height mheight = [] # Fitted rectangle width mwidth = [] # Ratio of height/width mwhratio = [] # Extract moments from contour image for x in range(0, len(contours)): mu.append(cv2.moments(contours[x])) marea.append(cv2.contourArea(contours[x])) mchild.append(int(hierarchy[0][x][2])) mindex.append(x) # Cycle through moment data and compute location for each moment for m in mu: if m['m00'] != 0: # This is the area term for a moment mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00'])) mx.append(int(m['m10'] / m['m00'])) my.append(int(m['m01'] / m['m00'])) else: mc.append((0, 0)) mx.append((0)) my.append((0)) # Loop over our contours and extract data about them for index, c in enumerate(contours): # Area isn't 0, but greater than min-area and less than max-area if marea[index] != 0 and minarea < marea[index] < maxarea: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.15 * peri, True) center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle mwidth.append(wh[0]) mheight.append(wh[1]) mwhratio.append(wh[0] / wh[1]) msquare.append(len(approx)) # If the approx contour has 4 points then we can assume we have 4-sided objects if len(approx) == 4 or 5: msquarecoords.append(approx) else: # It's not square msquare.append(0) msquarecoords.append(0) else: # Contour has area of 0, not interesting msquare.append(0) msquarecoords.append(0) mwidth.append(0) mheight.append(0) mwhratio.append(0) # Make a pandas df from data for filtering out junk locarea = {'index': mindex, 'X': mx, 'Y': my, 'width': mwidth, 'height': mheight, 'WHratio': mwhratio, 'Area': marea, 'square': msquare, 'child': mchild} df = pd.DataFrame(locarea) # Add calculated blur factor to output df['blurriness'] = blurfactor # Filter df for attributes that would isolate squares of reasonable size df = df[(df['Area'] > minarea) & (df['Area'] < maxarea) & (df['child'] != -1) & (df['square'].isin([4, 5])) & (df['WHratio'] < 1.2) & (df['WHratio'] > 0.85)] # Filter nested squares from dataframe, was having issues with median being towards smaller nested squares df = df[~(df['index'].isin(df['index'] + 1))] # Count up squares that are within a given radius, more squares = more likelihood of them being the card # Median width of square time 2.5 gives proximity radius for searching for similar squares median_sq_width_px = df[""width""].median() # Squares that are within 6 widths of the current square pixeldist = median_sq_width_px * 6 # Computes euclidean distance matrix for the x and y contour centroids distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']]))) # Add up distances that are less than ones have distance less than pixeldist pixels distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1) # Append distprox summary to dataframe df = df.assign(distprox=distmatrixflat.values) # Compute how similar in area the squares are. lots of similar values indicates card # isolate area measurements filtered_area = df['Area'] # Create empty matrix for storing comparisons sizecomp = np.zeros((len(filtered_area), len(filtered_area))) # Double loop through all areas to compare to each other for p in range(0, len(filtered_area)): for o in range(0, len(filtered_area)): big = max(filtered_area.iloc[p], filtered_area.iloc[o]) small = min(filtered_area.iloc[p], filtered_area.iloc[o]) pct = 100. * (small / big) sizecomp[p][o] = pct # How many comparisons given 90% square similarity sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1) # Append sizeprox summary to dataframe df = df.assign(sizeprox=sizematrix.values) # Reorder dataframe for better printing df = df[['index', 'X', 'Y', 'width', 'height', 'WHratio', 'Area', 'square', 'child', 'blurriness', 'distprox', 'sizeprox']] # Loosely filter for size and distance (relative size to median) minsqwidth = median_sq_width_px * 0.80 maxsqwidth = median_sq_width_px * 1.2 df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) & (df['width'] < maxsqwidth)] # Filter for proximity again to root out stragglers # Find and count up squares that are within given radius, # more squares = more likelihood of them being the card # Median width of square time 2.5 gives proximity radius for searching for similar squares median_sq_width_px = df[""width""].median() # Squares that are within 6 widths of the current square pixeldist = median_sq_width_px * 5 # Computes euclidean distance matrix for the x and y contour centroids distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']]))) # Add up distances that are less than ones have distance less than pixeldist pixels distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1) # Append distprox summary to dataframe df = df.assign(distprox=distmatrixflat.values) # Filter results for distance proximity to other squares df = df[(df['distprox'] >= 4)] # Extract the starting coordinate start_coord = (int(df['X'].min()), int(df['Y'].min())) # Calculate the range spacingx_short = (df['X'].max() - df['X'].min()) / 3 spacingy_short = (df['Y'].max() - df['Y'].min()) / 3 spacingx_long = (df['X'].max() - df['X'].min()) / 5 spacingy_long = (df['Y'].max() - df['Y'].min()) / 5 # Chip spacing since 4x6 card assumed spacing_short = min(spacingx_short, spacingy_short) spacing_long = max(spacingx_long, spacingy_long) # Smaller spacing measurement might have a chip missing spacing = int(max(spacing_short, spacing_long)) spacing = (spacing, spacing) return df, start_coord, spacing ","def find_color_card(img, threshold='adaptgauss', threshvalue=125, blurry=False, background='dark'): """"""Automatically detects a color card and output info to use in create_color_card_mask function Inputs: img = Input RGB image data containing a color card. threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss) threshvalue = Thresholding value, optional (default 125) blurry = Bool (default False) if True then image sharpening applied background = Type of image background either 'dark' or 'light (default 'dark'); if 'light' then histogram expansion applied to better detect edges, but histogram expansion will be hindered if there is a dark background Returns: df = Dataframe containing information about the filtered contours start_coord = Two element tuple of starting coordinates, location of the top left pixel detected spacing = Two element tuple of spacing between centers of chips :param img: numpy.ndarray :param threshold: str :param threshvalue: int :param blurry: bool :param background: str :return df: pandas.core.frame.DataFrame :return start_coord: tuple :return spacing: tuple """""" # Imports import skimage import pandas as pd from scipy.spatial.distance import squareform, pdist # Get image attributes height, width, channels = img.shape totalpx = float(height * width) # Minimum and maximum square size based upon 12 MP image minarea = 1000. / 12000000. * totalpx maxarea = 8000000. / 12000000. * totalpx # Create gray image for further processing gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Laplacian Fourier Transform detection of blurriness blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var() # If image is blurry then try to deblur using kernel if blurry: # from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening kernel = np.array([[-1, -1, -1, -1, -1], [-1, 2, 2, 2, -1], [-1, 2, 8, 2, -1], [-1, 2, 2, 2, -1], [-1, -1, -1, -1, -1]]) / 8.0 # Store result back out for further processing gray_img = cv2.filter2D(gray_img, -1, kernel) # In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu # thresholding. If your image has a bright background then apply if background == 'light': clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4)) # apply CLAHE histogram expansion to find squares better with canny edge detection gray_img = clahe.apply(gray_img) elif background != 'dark': fatal_error('Background parameter ' + str(background) + ' is not ""light"" or ""dark""!') # Thresholding if threshold == ""otsu"": # Blur slightly so defects on card squares and background patterns are less likely to be picked up gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0) ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) elif threshold == ""normal"": # Blur slightly so defects on card squares and background patterns are less likely to be picked up gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0) ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY) elif threshold == ""adaptgauss"": # Blur slightly so defects on card squares and background patterns are less likely to be picked up gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0) threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 51, 2) else: fatal_error('Threshold ' + str(threshold) + ' is not ""otsu"", ""normal"", or ""adaptgauss""!') # Apply automatic Canny edge detection using the computed median edges = skimage.feature.canny(threshold) edges.dtype = 'uint8' # Compute contours to find the squares of the card _, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Variable of which contour is which mindex = [] # Variable to store moments mu = [] # Variable to x,y coordinates in tuples mc = [] # Variable to x coordinate as integer mx = [] # Variable to y coordinate as integer my = [] # Variable to store area marea = [] # Variable to store whether something is a square (1) or not (0) msquare = [] # Variable to store square approximation coordinates msquarecoords = [] # Variable to store child hierarchy element mchild = [] # Fitted rectangle height mheight = [] # Fitted rectangle width mwidth = [] # Ratio of height/width mwhratio = [] # Extract moments from contour image for x in range(0, len(contours)): mu.append(cv2.moments(contours[x])) marea.append(cv2.contourArea(contours[x])) mchild.append(int(hierarchy[0][x][2])) mindex.append(x) # Cycle through moment data and compute location for each moment for m in mu: if m['m00'] != 0: # This is the area term for a moment mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00'])) mx.append(int(m['m10'] / m['m00'])) my.append(int(m['m01'] / m['m00'])) else: mc.append((0, 0)) mx.append((0)) my.append((0)) # Loop over our contours and extract data about them for index, c in enumerate(contours): # Area isn't 0, but greater than min-area and less than max-area if marea[index] != 0 and minarea < marea[index] < maxarea: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.15 * peri, True) center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle mwidth.append(wh[0]) mheight.append(wh[1]) mwhratio.append(wh[0] / wh[1]) msquare.append(len(approx)) # If the approx contour has 4 points then we can assume we have 4-sided objects if len(approx) == 4 or 5: msquarecoords.append(approx) else: # It's not square msquare.append(0) msquarecoords.append(0) else: # Contour has area of 0, not interesting msquare.append(0) msquarecoords.append(0) mwidth.append(0) mheight.append(0) mwhratio.append(0) # Make a pandas df from data for filtering out junk locarea = {'index': mindex, 'X': mx, 'Y': my, 'width': mwidth, 'height': mheight, 'WHratio': mwhratio, 'Area': marea, 'square': msquare, 'child': mchild} df = pd.DataFrame(locarea) # Add calculated blur factor to output df['blurriness'] = blurfactor # Filter df for attributes that would isolate squares of reasonable size df = df[(df['Area'] > minarea) & (df['Area'] < maxarea) & (df['child'] != -1) & (df['square'].isin([4, 5])) & (df['WHratio'] < 1.2) & (df['WHratio'] > 0.85)] # Filter nested squares from dataframe, was having issues with median being towards smaller nested squares df = df[~(df['index'].isin(df['index'] + 1))] # Count up squares that are within a given radius, more squares = more likelihood of them being the card # Median width of square time 2.5 gives proximity radius for searching for similar squares median_sq_width_px = df[""width""].median() # Squares that are within 6 widths of the current square pixeldist = median_sq_width_px * 6 # Computes euclidean distance matrix for the x and y contour centroids distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']]))) # Add up distances that are less than ones have distance less than pixeldist pixels distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1) # Append distprox summary to dataframe df = df.assign(distprox=distmatrixflat.values) # Compute how similar in area the squares are. lots of similar values indicates card # isolate area measurements filtered_area = df['Area'] # Create empty matrix for storing comparisons sizecomp = np.zeros((len(filtered_area), len(filtered_area))) # Double loop through all areas to compare to each other for p in range(0, len(filtered_area)): for o in range(0, len(filtered_area)): big = max(filtered_area.iloc[p], filtered_area.iloc[o]) small = min(filtered_area.iloc[p], filtered_area.iloc[o]) pct = 100. * (small / big) sizecomp[p][o] = pct # How many comparisons given 90% square similarity sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1) # Append sizeprox summary to dataframe df = df.assign(sizeprox=sizematrix.values) # Reorder dataframe for better printing df = df[['index', 'X', 'Y', 'width', 'height', 'WHratio', 'Area', 'square', 'child', 'blurriness', 'distprox', 'sizeprox']] # Loosely filter for size and distance (relative size to median) minsqwidth = median_sq_width_px * 0.80 maxsqwidth = median_sq_width_px * 1.2 df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) & (df['width'] < maxsqwidth)] # Filter for proximity again to root out stragglers # Find and count up squares that are within given radius, # more squares = more likelihood of them being the card # Median width of square time 2.5 gives proximity radius for searching for similar squares median_sq_width_px = df[""width""].median() # Squares that are within 6 widths of the current square pixeldist = median_sq_width_px * 5 # Computes euclidean distance matrix for the x and y contour centroids distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']]))) # Add up distances that are less than ones have distance less than pixeldist pixels distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1) # Append distprox summary to dataframe df = df.assign(distprox=distmatrixflat.values) # Filter results for distance proximity to other squares df = df[(df['distprox'] >= 4)] # Extract the starting coordinate start_coord = (int(df['X'].min()), int(df['Y'].min())) # Calculate the range spacingx_short = (df['X'].max() - df['X'].min()) / 3 spacingy_short = (df['Y'].max() - df['Y'].min()) / 3 spacingx_long = (df['X'].max() - df['X'].min()) / 5 spacingy_long = (df['Y'].max() - df['Y'].min()) / 5 # Chip spacing since 4x6 card assumed spacing_short = min(spacingx_short, spacingy_short) spacing_long = max(spacingx_long, spacingy_long) # Smaller spacing measurement might have a chip missing spacing = int(max(spacing_short, spacing_long)) spacing = (spacing, spacing) return df, start_coord, spacing " 7619,"def _comparison_decorator(pyfunc: Callable[..., Any]) -> Callable[..., Any]: """""" Decorator to make wrapper function that parses cosmology-like inputs. Parameters ---------- pyfunc : Python function object An arbitrary Python function. Returns ------- callable[..., Any] Wrapped `pyfunc`, as described above. Notes ----- All decorated functions should add the following to 'Parameters'. format : bool or None or str or tuple thereof, optional keyword-only Whether to allow the arguments to be converted to a |Cosmology|. This allows, e.g. a |Table| to be given instead a Cosmology. `False` (default) will not allow conversion. `True` or `None` will, and will use the auto-identification to try to infer the correct format. A `str` is assumed to be the correct format to use when converting. `format` is broadcast to match the shape of the cosmology arguments. Note that the cosmology arguments are not broadcast against ``format``, so it cannot determine the output shape. """""" sig = inspect.signature(pyfunc) nin = len([p.kind == 0 for p in sig.parameters.values()]) # Make wrapper function that parses cosmology-like inputs @functools.wraps(pyfunc) def wrapper(*cosmos: Any, format: _FormatsT = False, **kwargs: Any) -> bool: if len(cosmos) > nin: raise TypeError # Parse cosmologies to format. Only do specified number. cosmos = _parse_formats(*cosmos, format=format) # Evaluate pyfunc, erroring if didn't match specified number. result = wrapper.__wrapped__(*cosmos, **kwargs) # Return, casting to correct type casting is possible. return result return wrapper ","def _comparison_decorator(pyfunc: Callable[..., Any]) -> Callable[..., Any]: """""" Decorator to make wrapper function that parses cosmology-like inputs. Parameters ---------- pyfunc : Python function object An arbitrary Python function. Returns ------- callable[..., Any] Wrapped `pyfunc`, as described above. Notes ----- All decorated functions should add the following to 'Parameters'. format : bool or None or str or tuple thereof, optional keyword-only Whether to allow the arguments to be converted to a |Cosmology|. This allows, e.g. a |Table| to be given instead a Cosmology. `False` (default) will not allow conversion. `True` or `None` will, and will use the auto-identification to try to infer the correct format. A `str` is assumed to be the correct format to use when converting. `format` is broadcast to match the shape of the cosmology arguments. Note that the cosmology arguments are not broadcast against ``format``, so it cannot determine the output shape. """""" nin = len(inspect.signature(pyfunc).parameters) @functools.wraps(pyfunc) def wrapper(*cosmos: Any, format: _FormatsT = False, **kwargs: Any) -> bool: if len(cosmos) > nin: raise TypeError # Parse cosmologies to format. Only do specified number. cosmos = _parse_formats(*cosmos, format=format) # Evaluate pyfunc, erroring if didn't match specified number. result = wrapper.__wrapped__(*cosmos, **kwargs) # Return, casting to correct type casting is possible. return result return wrapper " 39278,"def numpy_to_idarr(ind, deep=False, return_ind=False): """"""Safely convert a numpy array to a vtkIdTypeArray"""""" try: ind = np.array(ind) except: raise TypeError('Indices must be either a mask, array, list, or iterable') if ind.dtype == np.bool: ind = ind.nonzero()[0] if not isinstance(ind, np.ndarray): ind = np.asarray(ind, pyvista.ID_TYPE) elif ind.dtype != pyvista.ID_TYPE: ind = ind.astype(pyvista.ID_TYPE) elif not ind.flags['C_CONTIGUOUS']: ind = np.ascontiguousarray(ind, dtype=pyvista.ID_TYPE) # must ravel or segfault when saving MultiBlock vtk_idarr = numpy_to_vtkIdTypeArray(ind.ravel(), deep=deep) if return_ind: return vtk_idarr, ind return vtk_idarr ","def numpy_to_idarr(ind, deep=False, return_ind=False): """"""Safely convert a numpy array to a vtkIdTypeArray"""""" try: ind = np.asarray(ind) except: raise TypeError('Indices must be either a mask, array, list, or iterable') if ind.dtype == np.bool: ind = ind.nonzero()[0] if not isinstance(ind, np.ndarray): ind = np.asarray(ind, pyvista.ID_TYPE) elif ind.dtype != pyvista.ID_TYPE: ind = ind.astype(pyvista.ID_TYPE) elif not ind.flags['C_CONTIGUOUS']: ind = np.ascontiguousarray(ind, dtype=pyvista.ID_TYPE) # must ravel or segfault when saving MultiBlock vtk_idarr = numpy_to_vtkIdTypeArray(ind.ravel(), deep=deep) if return_ind: return vtk_idarr, ind return vtk_idarr " 10828,"def _legalize(module, dmm, fndesc): """""" Legalize the code in the module. Returns True if the module is legal for the rewrite pass that remove unnecessary refcount. """""" def valid_output(ty): """""" Valid output are any type that does not need refcount """""" model = dmm[ty] return not model.contains_nrt_meminfo() def valid_input(ty): """""" Valid input are any type that does not need refcount except Array. """""" return valid_output(ty) or isinstance(ty, types.Array) # Ensure no reference to function marked as # ""numba_args_may_always_need_nrt"" try: nmd = module.get_named_metadata(""numba_args_may_always_need_nrt"") except KeyError: # Nothing marked pass else: # Has functions marked as ""numba_args_may_always_need_nrt"" if len(nmd.operands) > 0: # The pass is illegal for this compilation unit. return False # More legalization base on function type argtypes = fndesc.argtypes restype = fndesc.restype calltypes = fndesc.calltypes # Legalize function arguments for argty in argtypes: if not valid_input(argty): return False # Legalize function return if not valid_output(restype): return False # Legalize all called functions for callty in calltypes.values(): if callty is not None and not valid_output(callty.return_type): return False # Ensure no allocation for fn in module.functions: if fn.name.startswith(""NRT_""): if fn.name not in _accepted_nrtfns: return False return True ","def _legalize(module, dmm, fndesc): """""" Legalize the code in the module. Returns True if the module is legal for the rewrite pass that remove unnecessary refcounts. """""" def valid_output(ty): """""" Valid output are any type that does not need refcount """""" model = dmm[ty] return not model.contains_nrt_meminfo() def valid_input(ty): """""" Valid input are any type that does not need refcount except Array. """""" return valid_output(ty) or isinstance(ty, types.Array) # Ensure no reference to function marked as # ""numba_args_may_always_need_nrt"" try: nmd = module.get_named_metadata(""numba_args_may_always_need_nrt"") except KeyError: # Nothing marked pass else: # Has functions marked as ""numba_args_may_always_need_nrt"" if len(nmd.operands) > 0: # The pass is illegal for this compilation unit. return False # More legalization base on function type argtypes = fndesc.argtypes restype = fndesc.restype calltypes = fndesc.calltypes # Legalize function arguments for argty in argtypes: if not valid_input(argty): return False # Legalize function return if not valid_output(restype): return False # Legalize all called functions for callty in calltypes.values(): if callty is not None and not valid_output(callty.return_type): return False # Ensure no allocation for fn in module.functions: if fn.name.startswith(""NRT_""): if fn.name not in _accepted_nrtfns: return False return True " 23612,"def singlediode(photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, ivcurve_pnts=None, method='lambertw'): r"""""" Solve the single-diode model to obtain a photovoltaic IV curve. Singlediode solves the single diode equation [1]_ .. math:: I = I_L - I_0 \left[ \exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1 \right] - \frac{V + I R_s}{R_{sh}} for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and :math:`n N_s V_{th}` which are described later. Returns a DataFrame which contains the 5 points on the I-V curve specified in SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and :math:`n N_s V_{th}` are scalar, a single curve will be returned, if any are Series (of the same length), multiple IV curves will be calculated. The input parameters can be calculated using :py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data. Parameters ---------- photocurrent : numeric Light-generated current :math:`I_L` (photocurrent) under desired IV curve conditions. ``0 <= photocurrent``. [A] saturation_current : numeric Diode saturation :math:`I_0` current under desired IV curve conditions. ``0 < saturation_current``. [A] resistance_series : numeric Series resistance :math:`R_s` under desired IV curve conditions. ``0 <= resistance_series < numpy.inf``. [ohms] resistance_shunt : numeric Shunt resistance :math:`R_{sh}` under desired IV curve conditions. ``0 < resistance_shunt <= numpy.inf``. [ohms] nNsVth : numeric The product of three components. 1) The usual diode ideal factor :math:`n`, 2) the number of cells in series :math:`N_s`, and 3) the cell thermal voltage under the desired IV curve conditions :math:`V_{th}`. The thermal voltage of the cell (in volts) may be calculated as :math:`k_B T_c / q`, where :math:`k_B` is Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n junction in Kelvin, and :math:`q` is the charge of an electron (coulombs). ``0 < nNsVth``. [V] ivcurve_pnts : None or int, default None Number of points in the desired IV curve. If None or 0, no IV curves will be produced. method : str, default 'lambertw' Determines the method used to calculate points on the IV curve. The options are ``'lambertw'``, ``'newton'``, or ``'brentq'``. Returns ------- OrderedDict or DataFrame The returned dict-like object always contains the keys/columns: * i_sc - short circuit current in amperes. * v_oc - open circuit voltage in volts. * i_mp - current at maximum power point in amperes. * v_mp - voltage at maximum power point in volts. * p_mp - power at maximum power point in watts. * i_x - current, in amperes, at ``v = 0.5*v_oc``. * i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``. If ivcurve_pnts is greater than 0, the output dictionary will also include the keys: * i - IV curve current in amperes. * v - IV curve voltage in volts. The output will be an OrderedDict if photocurrent is a scalar, array, or ivcurve_pnts is not None. The output will be a DataFrame if photocurrent is a Series and ivcurve_pnts is None. Notes ----- If the method is ``'lambertw'`` then the solution employed to solve the implicit diode equation utilizes the Lambert W function to obtain an explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_. If the method is ``'newton'`` then the root-finding Newton-Raphson method is used. It should be safe for well behaved IV-curves, but the ``'brentq'`` method is recommended for reliability. If the method is ``'brentq'`` then Brent's bisection search method is used that guarantees convergence by bounding the voltage between zero and open-circuit. If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts`` are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to calculate the points on the IV curve points at diode voltages from zero to open-circuit voltage with a log spacing that gets closer as voltage increases. If the method is ``'lambertw'`` then the calculated points on the IV curve are linearly spaced. References ---------- .. [1] S.R. Wenham, M.A. Green, M.E. Watt, ""Applied Photovoltaics"" ISBN 0 86758 909 4 .. [2] A. Jain, A. Kapoor, ""Exact analytical solutions of the parameters of real solar cells using Lambert W-function"", Solar Energy Materials and Solar Cells, 81 (2004) 269-277. .. [3] D. King et al, ""Sandia Photovoltaic Array Performance Model"", SAND2004-3535, Sandia National Laboratories, Albuquerque, NM .. [4] ""Computer simulation of the effects of electrical mismatches in photovoltaic cell interconnection circuits"" JW Bishop, Solar Cell (1988) https://doi.org/10.1016/0379-6787(88)90059-2 See also -------- sapm calcparams_desoto pvlib.singlediode.bishop88 """""" # Calculate points on the IV curve using the LambertW solution to the # single diode equation if method.lower() == 'lambertw': out = _singlediode._lambertw( photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, ivcurve_pnts ) i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7] if ivcurve_pnts: ivcurve_i, ivcurve_v = out[7:] else: # Calculate points on the IV curve using either 'newton' or 'brentq' # methods. Voltages are determined by first solving the single diode # equation for the diode voltage V_d then backing out voltage args = (photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth) # collect args v_oc = _singlediode.bishop88_v_from_i( 0.0, *args, method=method.lower() ) i_mp, v_mp, p_mp = _singlediode.bishop88_mpp( *args, method=method.lower() ) i_sc = _singlediode.bishop88_i_from_v( 0.0, *args, method=method.lower() ) i_x = _singlediode.bishop88_i_from_v( v_oc / 2.0, *args, method=method.lower() ) i_xx = _singlediode.bishop88_i_from_v( (v_oc + v_mp) / 2.0, *args, method=method.lower() ) # calculate the IV curve if requested using bishop88 if ivcurve_pnts: vd = v_oc * ( (11.0 - np.logspace(np.log10(11.0), 0.0, ivcurve_pnts)) / 10.0 ) ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args) out = OrderedDict() out['i_sc'] = i_sc out['v_oc'] = v_oc out['i_mp'] = i_mp out['v_mp'] = v_mp out['p_mp'] = p_mp out['i_x'] = i_x out['i_xx'] = i_xx if ivcurve_pnts: out['v'] = ivcurve_v out['i'] = ivcurve_i if isinstance(photocurrent, pd.Series) and not ivcurve_pnts: out = pd.DataFrame(out, index=photocurrent.index) return out ","def singlediode(photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, ivcurve_pnts=None, method='lambertw'): r"""""" Solve the single-diode model to obtain a photovoltaic IV curve. Singlediode solves the single diode equation [1]_ .. math:: I = I_L - I_0 \left[ \exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1 \right] - \frac{V + I R_s}{R_{sh}} for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and :math:`n N_s V_{th}` which are described later. Returns a DataFrame which contains the 5 points on the I-V curve specified in SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and :math:`n N_s V_{th}` are scalar, a single curve will be returned, if any are Series (of the same length), multiple IV curves will be calculated. The input parameters can be calculated using :py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data. Parameters ---------- photocurrent : numeric Light-generated current :math:`I_L` (photocurrent) under desired IV curve conditions. ``0 <= photocurrent``. [A] saturation_current : numeric Diode saturation :math:`I_0` current under desired IV curve conditions. ``0 < saturation_current``. [A] resistance_series : numeric Series resistance :math:`R_s` under desired IV curve conditions. ``0 <= resistance_series < numpy.inf``. [ohms] resistance_shunt : numeric Shunt resistance :math:`R_{sh}` under desired IV curve conditions. ``0 < resistance_shunt <= numpy.inf``. [ohms] nNsVth : numeric The product of three components. 1) The usual diode ideal factor :math:`n`, 2) the number of cells in series :math:`N_s`, and 3) the cell thermal voltage under the desired IV curve conditions :math:`V_{th}`. The thermal voltage of the cell (in volts) may be calculated as :math:`k_B T_c / q`, where :math:`k_B` is Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n junction in Kelvin, and :math:`q` is the charge of an electron (coulombs). ``0 < nNsVth``. [V] ivcurve_pnts : None or int, default None Number of points in the desired IV curve. If None or 0, no points on the IV curves will be produced. method : str, default 'lambertw' Determines the method used to calculate points on the IV curve. The options are ``'lambertw'``, ``'newton'``, or ``'brentq'``. Returns ------- OrderedDict or DataFrame The returned dict-like object always contains the keys/columns: * i_sc - short circuit current in amperes. * v_oc - open circuit voltage in volts. * i_mp - current at maximum power point in amperes. * v_mp - voltage at maximum power point in volts. * p_mp - power at maximum power point in watts. * i_x - current, in amperes, at ``v = 0.5*v_oc``. * i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``. If ivcurve_pnts is greater than 0, the output dictionary will also include the keys: * i - IV curve current in amperes. * v - IV curve voltage in volts. The output will be an OrderedDict if photocurrent is a scalar, array, or ivcurve_pnts is not None. The output will be a DataFrame if photocurrent is a Series and ivcurve_pnts is None. Notes ----- If the method is ``'lambertw'`` then the solution employed to solve the implicit diode equation utilizes the Lambert W function to obtain an explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_. If the method is ``'newton'`` then the root-finding Newton-Raphson method is used. It should be safe for well behaved IV-curves, but the ``'brentq'`` method is recommended for reliability. If the method is ``'brentq'`` then Brent's bisection search method is used that guarantees convergence by bounding the voltage between zero and open-circuit. If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts`` are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to calculate the points on the IV curve points at diode voltages from zero to open-circuit voltage with a log spacing that gets closer as voltage increases. If the method is ``'lambertw'`` then the calculated points on the IV curve are linearly spaced. References ---------- .. [1] S.R. Wenham, M.A. Green, M.E. Watt, ""Applied Photovoltaics"" ISBN 0 86758 909 4 .. [2] A. Jain, A. Kapoor, ""Exact analytical solutions of the parameters of real solar cells using Lambert W-function"", Solar Energy Materials and Solar Cells, 81 (2004) 269-277. .. [3] D. King et al, ""Sandia Photovoltaic Array Performance Model"", SAND2004-3535, Sandia National Laboratories, Albuquerque, NM .. [4] ""Computer simulation of the effects of electrical mismatches in photovoltaic cell interconnection circuits"" JW Bishop, Solar Cell (1988) https://doi.org/10.1016/0379-6787(88)90059-2 See also -------- sapm calcparams_desoto pvlib.singlediode.bishop88 """""" # Calculate points on the IV curve using the LambertW solution to the # single diode equation if method.lower() == 'lambertw': out = _singlediode._lambertw( photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, ivcurve_pnts ) i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7] if ivcurve_pnts: ivcurve_i, ivcurve_v = out[7:] else: # Calculate points on the IV curve using either 'newton' or 'brentq' # methods. Voltages are determined by first solving the single diode # equation for the diode voltage V_d then backing out voltage args = (photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth) # collect args v_oc = _singlediode.bishop88_v_from_i( 0.0, *args, method=method.lower() ) i_mp, v_mp, p_mp = _singlediode.bishop88_mpp( *args, method=method.lower() ) i_sc = _singlediode.bishop88_i_from_v( 0.0, *args, method=method.lower() ) i_x = _singlediode.bishop88_i_from_v( v_oc / 2.0, *args, method=method.lower() ) i_xx = _singlediode.bishop88_i_from_v( (v_oc + v_mp) / 2.0, *args, method=method.lower() ) # calculate the IV curve if requested using bishop88 if ivcurve_pnts: vd = v_oc * ( (11.0 - np.logspace(np.log10(11.0), 0.0, ivcurve_pnts)) / 10.0 ) ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args) out = OrderedDict() out['i_sc'] = i_sc out['v_oc'] = v_oc out['i_mp'] = i_mp out['v_mp'] = v_mp out['p_mp'] = p_mp out['i_x'] = i_x out['i_xx'] = i_xx if ivcurve_pnts: out['v'] = ivcurve_v out['i'] = ivcurve_i if isinstance(photocurrent, pd.Series) and not ivcurve_pnts: out = pd.DataFrame(out, index=photocurrent.index) return out " 46183,"def test_reordering(): """""" Test indexing into a LayerList by name """""" layers = LayersList() layer_a = Image(np.random.random((10, 10))) layer_a.name = 'image_a' layer_b = Image(np.random.random((15, 15))) layer_b.name = 'image_b' layer_c = Image(np.random.random((15, 15))) layer_c.name = 'image_c' layers.append(layer_a) layers.append(layer_b) layers.append(layer_c) # Rearrange layers by tuple layers[:] = layers[(1, 0, 2)] assert [l for l in layers] == [layer_b, layer_a, layer_c] # Swap layers by name layers['image_b', 'image_c'] = layers['image_c', 'image_b'] assert [l for l in layers] == [layer_c, layer_a, layer_b] # Reverse layers layers.reverse() assert [l for l in layers] == [layer_b, layer_a, layer_c] ","def test_reordering(): """""" Test indexing into a LayerList by name """""" layers = LayersList() layer_a = Image(np.random.random((10, 10))) layer_a.name = 'image_a' layer_b = Image(np.random.random((15, 15))) layer_b.name = 'image_b' layer_c = Image(np.random.random((15, 15))) layer_c.name = 'image_c' layers.append(layer_a) layers.append(layer_b) layers.append(layer_c) # Rearrange layers by tuple layers[:] = layers[(1, 0, 2)] assert [l for l in layers] == [layer_b, layer_a, layer_c] # Swap layers by name layers['image_b', 'image_c'] = layers['image_c', 'image_b'] assert list(layers) == [layer_c, layer_a, layer_b] # Reverse layers layers.reverse() assert [l for l in layers] == [layer_b, layer_a, layer_c] " 33881,"def upload_ray_libraries_if_needed(runtime_env: Dict[str, Any], scratch_dir: str) -> Dict[str, Any]: """"""Uploads the libraries and replaces them with a dict of name->URI."""""" libraries = runtime_env.get(""ray_libraries"") if libraries is None: return runtime_env if isinstance(libraries, (str, ModuleType)): libraries = [libraries] elif not isinstance(libraries, list): raise TypeError(""ray_libraries must be a str, module, or list of strs "" ""and modules."") [ray_path] = ray.__path__ libraries_uris = {} for library in libraries: if isinstance(library, str): library_path = library elif isinstance(library, ModuleType): if len(library.__path__) > 1: raise ValueError(""ray_libraries only supports modules whose "" ""__path__ has length 1."") [library_path] = library.__path__ else: raise TypeError(""ray_libraries must be a list of file paths or "" f""imported modules, got {type(module)}."") if not library_path.startswith(""/""): relative_path = library_path elif not library_path.startswith(ray_path): raise ValueError(""ray_libraries entries must be a sub-directory "" f""of Ray, not {library_path}."") else: relative_path = os.path.relpath(library_path, ray_path) full_path = os.path.join(ray_path, relative_path) library_uri = get_uri_for_directory(full_path) upload_package_if_needed(library_uri, scratch_dir, full_path) libraries_uris[relative_path] = library_uri runtime_env[""ray_libraries""] = libraries_uris return runtime_env ","def upload_ray_libraries_if_needed(runtime_env: Dict[str, Any], scratch_dir: str) -> Dict[str, Any]: """"""Uploads the libraries and replaces them with a dict of name->URI."""""" libraries = runtime_env.get(""ray_libraries"") if libraries is None: return runtime_env if isinstance(libraries, (str, ModuleType)): libraries = [libraries] elif not isinstance(libraries, list): raise TypeError(""ray_libraries must be a str, module, or list of strs "" ""and modules."") [ray_path] = ray.__path__ libraries_uris = {} for library in libraries: if isinstance(library, str): library_path = library elif isinstance(library, ModuleType): if len(library.__path__) > 1: raise ValueError(""ray_libraries only supports modules whose "" ""__path__ has length 1."") [library_path] = library.__path__ else: raise TypeError(""ray_libraries must be a list of file paths or "" f""imported modules, got {type(library)}."") if not library_path.startswith(""/""): relative_path = library_path elif not library_path.startswith(ray_path): raise ValueError(""ray_libraries entries must be a sub-directory "" f""of Ray, not {library_path}."") else: relative_path = os.path.relpath(library_path, ray_path) full_path = os.path.join(ray_path, relative_path) library_uri = get_uri_for_directory(full_path) upload_package_if_needed(library_uri, scratch_dir, full_path) libraries_uris[relative_path] = library_uri runtime_env[""ray_libraries""] = libraries_uris return runtime_env " 8002,"def sample_external_source(n_samples=1, prn_seed=None): """"""Sample external source .. versionadded:: 0.13.1 Parameters ---------- n_samples : int Number of samples prn_seed : int PRNG seed; if None, one will be generated randomly Returns ------- list of openmc.SourceParticle List of samples source particles """""" if n_samples <= 0: raise ValueError(""Number of samples must be positive"") if prn_seed is None: prn_seed = getrandbits(63) # Call into C API to sample source sites_array = (_SourceSite * n_samples)() _dll.openmc_sample_external_source(c_size_t(n_samples), c_uint64(prn_seed), sites_array) # Convert to list of SourceParticle and return return [ openmc.SourceParticle( r=site.r, u=site.u, E=site.E, time=site.time, wgt=site.wgt, delayed_group=site.delayed_group, surf_id=site.surf_id, particle=openmc.ParticleType(site.particle) ) for site in sites_array ] ","def sample_external_source(n_samples=1, prn_seed=None): """"""Sample external source .. versionadded:: 0.13.1 Parameters ---------- n_samples : int Number of samples prn_seed : int Pseudorandom number generator (PRNG) seed; if None, one will be generated randomly Returns ------- list of openmc.SourceParticle List of samples source particles """""" if n_samples <= 0: raise ValueError(""Number of samples must be positive"") if prn_seed is None: prn_seed = getrandbits(63) # Call into C API to sample source sites_array = (_SourceSite * n_samples)() _dll.openmc_sample_external_source(c_size_t(n_samples), c_uint64(prn_seed), sites_array) # Convert to list of SourceParticle and return return [ openmc.SourceParticle( r=site.r, u=site.u, E=site.E, time=site.time, wgt=site.wgt, delayed_group=site.delayed_group, surf_id=site.surf_id, particle=openmc.ParticleType(site.particle) ) for site in sites_array ] " 53214,"def test_multiple(aggregator): """""" Test when a domain is attached to 3 IPs [UP, DOWN, UP] """""" instance = deepcopy(common.INSTANCE_MULTIPLE) instance['name'] = 'multiple' instance['ip_cache_duration'] = 0 check = TCPCheck(common.CHECK_NAME, {}, [instance]) with mock.patch('socket.gethostbyname_ex', return_value=[None, None, ['ip1', 'ip2', 'ip3']]), mock.patch.object( check, 'connect', wraps=check.connect ) as connect: connect.side_effect = [None, Exception(), None] * 2 expected_tags = ['foo:bar', 'target_host:datadoghq.com', 'port:80', 'instance:multiple'] # Running the check twice check.check(None) check.check(None) aggregator.assert_metric('network.tcp.can_connect', value=1, tags=expected_tags + ['address:ip1'], count=2) aggregator.assert_metric('network.tcp.can_connect', value=0, tags=expected_tags + ['address:ip2'], count=2) aggregator.assert_metric('network.tcp.can_connect', value=1, tags=expected_tags + ['address:ip3'], count=2) aggregator.assert_service_check('tcp.can_connect', status=check.OK, count=4) aggregator.assert_service_check('tcp.can_connect', status=check.CRITICAL, count=2) aggregator.assert_all_metrics_covered() assert len(aggregator.service_checks('tcp.can_connect')) == 6 ","def test_multiple(aggregator): """""" Test when a domain is attached to 3 IPs [UP, DOWN, UP] """""" instance = deepcopy(common.INSTANCE_MULTIPLE) instance['name'] = 'multiple' instance['ip_cache_duration'] = 0 check = TCPCheck(common.CHECK_NAME, {}, [instance]) with mock.patch('socket.gethostbyname_ex', return_value=[None, None, ['ip1', 'ip2', 'ip3']]), mock.patch.object( check, 'connect', wraps=check.connect ) as connect: connect.side_effect = [None, Exception(), None] * 2 expected_tags = ['foo:bar', 'target_host:datadoghq.com', 'port:80', 'instance:multiple'] # Running the check twice dd_run_check(check) dd_run_check(check) aggregator.assert_metric('network.tcp.can_connect', value=1, tags=expected_tags + ['address:ip1'], count=2) aggregator.assert_metric('network.tcp.can_connect', value=0, tags=expected_tags + ['address:ip2'], count=2) aggregator.assert_metric('network.tcp.can_connect', value=1, tags=expected_tags + ['address:ip3'], count=2) aggregator.assert_service_check('tcp.can_connect', status=check.OK, count=4) aggregator.assert_service_check('tcp.can_connect', status=check.CRITICAL, count=2) aggregator.assert_all_metrics_covered() assert len(aggregator.service_checks('tcp.can_connect')) == 6 " 8987,"def search_lazy(*loaders): """"""Decorate a callable as a search rule with lazy loading. :param loaders: one or more functions to generate a list of **compiled** regexes to match patterns in a line. :type loaders: :term:`function` Each ``loader`` function must accept a ``settings`` parameter and return a list (or tuple) of **compiled** regular expressions:: import re def loader(settings): return [re.compile(r'')] It will be called by Sopel when the bot parses the plugin to register the search rules to get its regexes. The ``settings`` argument will be the bot's :class:`sopel.config.Config` object. If any of the ``loader`` functions raises a :exc:`~sopel.plugins.exceptions.PluginError` exception, the find rule will be ignored; it will not fail the plugin's loading. The decorated function will behave like any other :func:`callable`:: from sopel import plugin @plugin.search_lazy(loader) def my_search_rule_handler(bot, trigger): bot.say('Rule triggered by: %s' % trigger.group(0)) .. versionadded:: 7.1 .. seealso:: When more than one loader is provided, they will be chained together with the :func:`sopel.tools.chain_loaders` function. """""" def decorator(function): function._sopel_callable = True if not hasattr(function, 'search_rules_lazy_loaders'): function.search_rules_lazy_loaders = [] function.search_rules_lazy_loaders.extend(loaders) return function return decorator ","def search_lazy(*loaders): """"""Decorate a callable as a search rule with lazy loading. :param loaders: one or more functions to generate a list of **compiled** regexes to match patterns in a line :type loaders: :term:`function` Each ``loader`` function must accept a ``settings`` parameter and return a list (or tuple) of **compiled** regular expressions:: import re def loader(settings): return [re.compile(r'')] It will be called by Sopel when the bot parses the plugin to register the search rules to get its regexes. The ``settings`` argument will be the bot's :class:`sopel.config.Config` object. If any of the ``loader`` functions raises a :exc:`~sopel.plugins.exceptions.PluginError` exception, the find rule will be ignored; it will not fail the plugin's loading. The decorated function will behave like any other :func:`callable`:: from sopel import plugin @plugin.search_lazy(loader) def my_search_rule_handler(bot, trigger): bot.say('Rule triggered by: %s' % trigger.group(0)) .. versionadded:: 7.1 .. seealso:: When more than one loader is provided, they will be chained together with the :func:`sopel.tools.chain_loaders` function. """""" def decorator(function): function._sopel_callable = True if not hasattr(function, 'search_rules_lazy_loaders'): function.search_rules_lazy_loaders = [] function.search_rules_lazy_loaders.extend(loaders) return function return decorator " 14854,"def setup_platform(hass, config, add_devices, discovery_info=None): """"""Set up the binary sensor platform"""""" add_devices([Stookalert(config)], update_before_add=True) ","def setup_platform(hass, config, add_devices, discovery_info=None): """"""Set up the Stookalert binary sensor platform."""""" add_devices([Stookalert(config)], update_before_add=True) " 52149,"def configure_qdevice_interactive(): """""" Configure qdevice on interactive mode """""" if _context.yes_to_all or not confirm(""Do you want to configure QDevice?""): return qnetd_addr = prompt_for_string(""HOST or IP of the QNetd server to be used"") if not qnetd_addr: error(""Address of QNetd is required"") qdevice_port = prompt_for_string(""TCP PORT of QNetd server"", default=5403) qdevice_algo = prompt_for_string(""QNetd decision ALGORITHM(ffsplit/lms)"", default=""ffsplit"") qdevice_tie_breaker = prompt_for_string(""QNetd TIE_BREAKER(lowest/highest/valid node id)"", default=""lowest"") qdevice_tls = prompt_for_string(""Whether using TLS on QDevice/QNetd(on/off/required)"", default=""on"") qdevice_heuristics = prompt_for_string(""COMMAND to run with absolute path; For multiple commands, use \"";\"" to separate"") qdevice_heuristics_mode = prompt_for_string(""MODE of operation of heuristics(on/sync/off)"", default=""sync"") if qdevice_heuristics else None _context.qdevice_inst = corosync.QDevice( qnetd_addr, port=qdevice_port, algo=qdevice_algo, tie_breaker=qdevice_tie_breaker, tls=qdevice_tls, cmds=qdevice_heuristics, mode=qdevice_heuristics_mode) _context.qdevice_inst.valid_attr() ","def configure_qdevice_interactive(): """""" Configure qdevice on interactive mode """""" if _context.yes_to_all or not confirm(""Do you want to configure QDevice?""): return qnetd_addr = prompt_for_string(""HOST or IP of the QNetd server to be used"") if not qnetd_addr: error(""Address of QNetd is required"") qdevice_port = prompt_for_string(""TCP PORT of QNetd server"", default=5403) qdevice_algo = prompt_for_string(""QNetd decision ALGORITHM (ffsplit/lms)"", default=""ffsplit"") qdevice_tie_breaker = prompt_for_string(""QNetd TIE_BREAKER (lowest/highest/valid node id)"", default=""lowest"") qdevice_tls = prompt_for_string(""Whether using TLS on QDevice/QNetd (on/off/required)"", default=""on"") qdevice_heuristics = prompt_for_string(""COMMAND to run with absolute path; For multiple commands, use \"";\"" to separate"") qdevice_heuristics_mode = prompt_for_string(""MODE of operation of heuristics (on/sync/off)"", default=""sync"") if qdevice_heuristics else None _context.qdevice_inst = corosync.QDevice( qnetd_addr, port=qdevice_port, algo=qdevice_algo, tie_breaker=qdevice_tie_breaker, tls=qdevice_tls, cmds=qdevice_heuristics, mode=qdevice_heuristics_mode) _context.qdevice_inst.valid_attr() " 28590,"def plot_bpv( data, kind=""u_value"", t_stat=""median"", bpv=True, plot_mean=True, reference=""analytical"", mse=False, n_ref=100, hdi_prob=0.94, color=""C0"", grid=None, figsize=None, textsize=None, labeller=None, data_pairs=None, var_names=None, filter_vars=None, coords=None, flatten=None, flatten_pp=None, ax=None, backend=None, plot_ref_kwargs=None, backend_kwargs=None, group=""posterior"", show=None, ): """""" Plot Bayesian p-value for observed data and Posterior/Prior predictive. Parameters ---------- data : az.InferenceData object :class:`arviz.InferenceData` object containing the observed and posterior/prior predictive data. kind : str Type of plot to display (""p_value"", ""u_value"", ""t_stat""). Defaults to u_value. For ""p_value"" we compute p := p(y* ≤ y | y). This is the probability of the data y being larger or equal than the predicted data y*. The ideal value is 0.5 (half the predictions below and half above the data). For ""u_value"" we compute pi := p(yi* ≤ yi | y). i.e. like a p_value but per observation yi. This is also known as marginal p_value. The ideal distribution is uniform. This is similar to the LOO-pit calculation/plot, the difference is than in LOO-pit plot we compute pi = p(yi* r ≤ yi | y-i ), where y-i, is all other data except yi. For ""t_stat"" we compute := p(T(y)* ≤ T(y) | y) where T is any T statistic. See t_stat argument below for details of available options. t_stat : str, float, or callable T statistics to compute from the observations and predictive distributions. Allowed strings are ""mean"", ""median"" or ""std"". Defaults to ""median"". Alternative a quantile can be passed as a float (or str) in the interval (0, 1). Finally a user defined function is also acepted, see examples section for details. bpv : bool If True (default) add the bayesian p_value to the legend when ``kind = t_stat``. plot_mean : bool Whether or not to plot the mean T statistic. Defaults to True. reference : str How to compute the distributions used as reference for u_values or p_values. Allowed values are ""analytical"" (default) and ""samples"". Use `None` to do not plot any reference. Defaults to ""samples"". mse :bool Show scaled mean square error between uniform distribution and marginal p_value distribution. Defaults to False. n_ref : int, optional Number of reference distributions to sample when ``reference=samples``. Defaults to 100. hdi_prob: float, optional Probability for the highest density interval for the analytical reference distribution when computing u_values. Should be in the interval (0, 1]. Defaults to 0.94. color : str Matplotlib color grid : tuple Number of rows and columns. Defaults to None, the rows and columns are automatically inferred. figsize : tuple Figure size. If None it will be defined automatically. textsize : float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on ``figsize``. data_pairs : dict Dictionary containing relations between observed data and posterior/prior predictive data. Dictionary structure: - key = data var_name - value = posterior/prior predictive var_name For example, ``data_pairs = {'y' : 'y_hat'}`` If None, it will assume that the observed data and the posterior/prior predictive data have the same variable name. labeller : labeller instance, optional Class providing the method ``make_pp_label`` to generate the labels in the plot titles. Read the :ref:`label_guide` for more details and usage examples. var_names : list of variable names Variables to be plotted, if `None` all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars : {None, ""like"", ""regex""}, optional, default=None If `None` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords : dict Dictionary mapping dimensions to selected coordinates to be plotted. Dimensions without a mapping specified will include all coordinates for that dimension. Defaults to including all coordinates for all dimensions if None. flatten : list List of dimensions to flatten in observed_data. Only flattens across the coordinates specified in the coords argument. Defaults to flattening all of the dimensions. flatten_pp : list List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens across the coordinates specified in the coords argument. Defaults to flattening all of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs parameters. If flatten is defined and flatten_pp is None, then ``flatten_pp=flatten``. legend : bool Add legend to figure. By default True. ax : numpy array-like of matplotlib axes or bokeh figures, optional A 2D array of locations into which to plot the densities. If not supplied, Arviz will create its own array of plot areas (and return it). backend : str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default ""matplotlib"". plot_ref_kwargs : dict, optional Extra keyword arguments to control how reference is represented. Passed to :meth:`matplotlib.axes.Axes.plot` or :meth:`matplotlib.axes.Axes.axhspan` (when ``kind=u_value`` and ``reference=analytical``). backend_kwargs : bool, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. For additional documentation check the plotting method of the backend. group : {""prior"", ""posterior""}, optional Specifies which InferenceData group should be plotted. Defaults to 'posterior'. Other value can be 'prior'. show : bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures See Also -------- plot_ppc : Plot for posterior/prior predictive checks. plot_loo_pit : Plot Leave-One-Out probability integral transformation (PIT) predictive checks. plot_dist_comparison : Plot to compare fitted and unfitted distributions. References ---------- * Gelman et al. (2013) see http://www.stat.columbia.edu/~gelman/book/ pages 151-153 for details Examples -------- Plot Bayesian p_values. .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data(""regression1d"") >>> az.plot_bpv(data, kind=""p_value"") Plot custom T statistic comparison. .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data(""regression1d"") >>> az.plot_bpv(data, kind=""t_stat"", t_stat=lambda x:np.percentile(x, q=50, axis=-1)) """""" if group not in (""posterior"", ""prior""): raise TypeError(""`group` argument must be either `posterior` or `prior`"") for groups in (f""{group}_predictive"", ""observed_data""): if not hasattr(data, groups): raise TypeError(f'`data` argument must have the group ""{groups}""') if kind.lower() not in (""t_stat"", ""u_value"", ""p_value""): raise TypeError(""`kind` argument must be either `t_stat`, `u_value`, or `p_value`"") if reference is not None: if reference.lower() not in (""analytical"", ""samples""): raise TypeError( ""`reference` argument must be either `analytical`, `samples`, or `None`"" ) if hdi_prob is None: hdi_prob = rcParams[""stats.hdi_prob""] else: if not 1 >= hdi_prob > 0: raise ValueError(""The value of hdi_prob should be in the interval (0, 1]"") if data_pairs is None: data_pairs = {} if labeller is None: labeller = BaseLabeller() if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() observed = data.observed_data if group == ""posterior"": predictive_dataset = data.posterior_predictive elif group == ""prior"": predictive_dataset = data.prior_predictive if var_names is None: var_names = list(observed.data_vars) var_names = _var_names(var_names, observed, filter_vars) pp_var_names = [data_pairs.get(var, var) for var in var_names] pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars) if flatten_pp is None and flatten is None: flatten_pp = list(predictive_dataset.dims.keys()) elif flatten_pp is None: flatten_pp = flatten if flatten is None: flatten = list(observed.dims.keys()) if coords is None: coords = {} total_pp_samples = predictive_dataset.sizes[""chain""] * predictive_dataset.sizes[""draw""] for key in coords.keys(): coords[key] = np.where(np.in1d(observed[key], coords[key]))[0] obs_plotters = filter_plotters_list( list( xarray_var_iter( observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True ) ), ""plot_t_stats"", ) length_plotters = len(obs_plotters) pp_plotters = [ tup for _, tup in zip( range(length_plotters), xarray_var_iter( predictive_dataset.isel(coords), var_names=pp_var_names, skip_dims=set(flatten_pp), combined=True, ), ) ] rows, cols = default_grid(length_plotters, grid=grid) bpvplot_kwargs = dict( ax=ax, length_plotters=length_plotters, rows=rows, cols=cols, obs_plotters=obs_plotters, pp_plotters=pp_plotters, total_pp_samples=total_pp_samples, kind=kind, bpv=bpv, t_stat=t_stat, reference=reference, mse=mse, n_ref=n_ref, hdi_prob=hdi_prob, plot_mean=plot_mean, color=color, figsize=figsize, textsize=textsize, labeller=labeller, plot_ref_kwargs=plot_ref_kwargs, backend_kwargs=backend_kwargs, show=show, ) # TODO: Add backend kwargs plot = get_plotting_function(""plot_bpv"", ""bpvplot"", backend) axes = plot(**bpvplot_kwargs) return axes ","def plot_bpv( data, kind=""u_value"", t_stat=""median"", bpv=True, plot_mean=True, reference=""analytical"", mse=False, n_ref=100, hdi_prob=0.94, color=""C0"", grid=None, figsize=None, textsize=None, labeller=None, data_pairs=None, var_names=None, filter_vars=None, coords=None, flatten=None, flatten_pp=None, ax=None, backend=None, plot_ref_kwargs=None, backend_kwargs=None, group=""posterior"", show=None, ): """""" Plot Bayesian p-value for observed data and Posterior/Prior predictive. Parameters ---------- data : az.InferenceData object :class:`arviz.InferenceData` object containing the observed and posterior/prior predictive data. kind : str Type of plot to display (""p_value"", ""u_value"", ""t_stat""). Defaults to u_value. For ""p_value"" we compute p := p(y* ≤ y | y). This is the probability of the data y being larger or equal than the predicted data y*. The ideal value is 0.5 (half the predictions below and half above the data). For ""u_value"" we compute pi := p(yi* ≤ yi | y). i.e. like a p_value but per observation yi. This is also known as marginal p_value. The ideal distribution is uniform. This is similar to the LOO-pit calculation/plot, the difference is than in LOO-pit plot we compute pi = p(yi* r ≤ yi | y-i ), where y-i, is all other data except yi. For ""t_stat"" we compute := p(T(y)* ≤ T(y) | y) where T is any T statistic. See t_stat argument below for details of available options. t_stat : str, float, or callable T statistics to compute from the observations and predictive distributions. Allowed strings are ""mean"", ""median"" or ""std"". Defaults to ""median"". Alternative a quantile can be passed as a float (or str) in the interval (0, 1). Finally a user defined function is also acepted, see examples section for details. bpv : bool If True (default) add the bayesian p_value to the legend when ``kind = t_stat``. plot_mean : bool Whether or not to plot the mean test statistic. Defaults to True. reference : str How to compute the distributions used as reference for u_values or p_values. Allowed values are ""analytical"" (default) and ""samples"". Use `None` to do not plot any reference. Defaults to ""samples"". mse :bool Show scaled mean square error between uniform distribution and marginal p_value distribution. Defaults to False. n_ref : int, optional Number of reference distributions to sample when ``reference=samples``. Defaults to 100. hdi_prob: float, optional Probability for the highest density interval for the analytical reference distribution when computing u_values. Should be in the interval (0, 1]. Defaults to 0.94. color : str Matplotlib color grid : tuple Number of rows and columns. Defaults to None, the rows and columns are automatically inferred. figsize : tuple Figure size. If None it will be defined automatically. textsize : float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on ``figsize``. data_pairs : dict Dictionary containing relations between observed data and posterior/prior predictive data. Dictionary structure: - key = data var_name - value = posterior/prior predictive var_name For example, ``data_pairs = {'y' : 'y_hat'}`` If None, it will assume that the observed data and the posterior/prior predictive data have the same variable name. labeller : labeller instance, optional Class providing the method ``make_pp_label`` to generate the labels in the plot titles. Read the :ref:`label_guide` for more details and usage examples. var_names : list of variable names Variables to be plotted, if `None` all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars : {None, ""like"", ""regex""}, optional, default=None If `None` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords : dict Dictionary mapping dimensions to selected coordinates to be plotted. Dimensions without a mapping specified will include all coordinates for that dimension. Defaults to including all coordinates for all dimensions if None. flatten : list List of dimensions to flatten in observed_data. Only flattens across the coordinates specified in the coords argument. Defaults to flattening all of the dimensions. flatten_pp : list List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens across the coordinates specified in the coords argument. Defaults to flattening all of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs parameters. If flatten is defined and flatten_pp is None, then ``flatten_pp=flatten``. legend : bool Add legend to figure. By default True. ax : numpy array-like of matplotlib axes or bokeh figures, optional A 2D array of locations into which to plot the densities. If not supplied, Arviz will create its own array of plot areas (and return it). backend : str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default ""matplotlib"". plot_ref_kwargs : dict, optional Extra keyword arguments to control how reference is represented. Passed to :meth:`matplotlib.axes.Axes.plot` or :meth:`matplotlib.axes.Axes.axhspan` (when ``kind=u_value`` and ``reference=analytical``). backend_kwargs : bool, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. For additional documentation check the plotting method of the backend. group : {""prior"", ""posterior""}, optional Specifies which InferenceData group should be plotted. Defaults to 'posterior'. Other value can be 'prior'. show : bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures See Also -------- plot_ppc : Plot for posterior/prior predictive checks. plot_loo_pit : Plot Leave-One-Out probability integral transformation (PIT) predictive checks. plot_dist_comparison : Plot to compare fitted and unfitted distributions. References ---------- * Gelman et al. (2013) see http://www.stat.columbia.edu/~gelman/book/ pages 151-153 for details Examples -------- Plot Bayesian p_values. .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data(""regression1d"") >>> az.plot_bpv(data, kind=""p_value"") Plot custom T statistic comparison. .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data(""regression1d"") >>> az.plot_bpv(data, kind=""t_stat"", t_stat=lambda x:np.percentile(x, q=50, axis=-1)) """""" if group not in (""posterior"", ""prior""): raise TypeError(""`group` argument must be either `posterior` or `prior`"") for groups in (f""{group}_predictive"", ""observed_data""): if not hasattr(data, groups): raise TypeError(f'`data` argument must have the group ""{groups}""') if kind.lower() not in (""t_stat"", ""u_value"", ""p_value""): raise TypeError(""`kind` argument must be either `t_stat`, `u_value`, or `p_value`"") if reference is not None: if reference.lower() not in (""analytical"", ""samples""): raise TypeError( ""`reference` argument must be either `analytical`, `samples`, or `None`"" ) if hdi_prob is None: hdi_prob = rcParams[""stats.hdi_prob""] else: if not 1 >= hdi_prob > 0: raise ValueError(""The value of hdi_prob should be in the interval (0, 1]"") if data_pairs is None: data_pairs = {} if labeller is None: labeller = BaseLabeller() if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() observed = data.observed_data if group == ""posterior"": predictive_dataset = data.posterior_predictive elif group == ""prior"": predictive_dataset = data.prior_predictive if var_names is None: var_names = list(observed.data_vars) var_names = _var_names(var_names, observed, filter_vars) pp_var_names = [data_pairs.get(var, var) for var in var_names] pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars) if flatten_pp is None and flatten is None: flatten_pp = list(predictive_dataset.dims.keys()) elif flatten_pp is None: flatten_pp = flatten if flatten is None: flatten = list(observed.dims.keys()) if coords is None: coords = {} total_pp_samples = predictive_dataset.sizes[""chain""] * predictive_dataset.sizes[""draw""] for key in coords.keys(): coords[key] = np.where(np.in1d(observed[key], coords[key]))[0] obs_plotters = filter_plotters_list( list( xarray_var_iter( observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True ) ), ""plot_t_stats"", ) length_plotters = len(obs_plotters) pp_plotters = [ tup for _, tup in zip( range(length_plotters), xarray_var_iter( predictive_dataset.isel(coords), var_names=pp_var_names, skip_dims=set(flatten_pp), combined=True, ), ) ] rows, cols = default_grid(length_plotters, grid=grid) bpvplot_kwargs = dict( ax=ax, length_plotters=length_plotters, rows=rows, cols=cols, obs_plotters=obs_plotters, pp_plotters=pp_plotters, total_pp_samples=total_pp_samples, kind=kind, bpv=bpv, t_stat=t_stat, reference=reference, mse=mse, n_ref=n_ref, hdi_prob=hdi_prob, plot_mean=plot_mean, color=color, figsize=figsize, textsize=textsize, labeller=labeller, plot_ref_kwargs=plot_ref_kwargs, backend_kwargs=backend_kwargs, show=show, ) # TODO: Add backend kwargs plot = get_plotting_function(""plot_bpv"", ""bpvplot"", backend) axes = plot(**bpvplot_kwargs) return axes " 43260,"def main(): """"""Bandit CLI."""""" # bring our logging stuff up as early as possible debug = ( logging.DEBUG if ""-d"" in sys.argv or ""--debug"" in sys.argv else logging.INFO ) _init_logger(debug) extension_mgr = _init_extensions() baseline_formatters = [ f.name for f in filter( lambda x: hasattr(x.plugin, ""_accepts_baseline""), extension_mgr.formatters, ) ] # now do normal startup parser = argparse.ArgumentParser( description=""Bandit - a Python source code security analyzer"", formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( ""targets"", metavar=""targets"", type=str, nargs=""*"", help=""source file(s) or directory(s) to be tested"", ) parser.add_argument( ""-r"", ""--recursive"", dest=""recursive"", action=""store_true"", help=""find and process files in subdirectories"", ) parser.add_argument( ""-a"", ""--aggregate"", dest=""agg_type"", action=""store"", default=""file"", type=str, choices=[""file"", ""vuln""], help=""aggregate output by vulnerability (default) or by filename"", ) parser.add_argument( ""-n"", ""--number"", dest=""context_lines"", action=""store"", default=3, type=int, help=""maximum number of code lines to output for each issue"", ) parser.add_argument( ""-c"", ""--configfile"", dest=""config_file"", action=""store"", default=None, type=str, help=""optional config file to use for selecting plugins and "" ""overriding defaults"", ) parser.add_argument( ""-p"", ""--profile"", dest=""profile"", action=""store"", default=None, type=str, help=""profile to use (defaults to executing all tests)"", ) parser.add_argument( ""-t"", ""--tests"", dest=""tests"", action=""store"", default=None, type=str, help=""comma-separated list of test IDs to run"", ) parser.add_argument( ""-s"", ""--skip"", dest=""skips"", action=""store"", default=None, type=str, help=""comma-separated list of test IDs to skip"", ) severity_group = parser.add_mutually_exclusive_group(required=False) severity_group.add_argument( ""-l"", ""--level"", dest=""severity"", action=""count"", default=1, help=""report only issues of a given severity level or "" ""higher (-l for LOW, -ll for MEDIUM, -lll for HIGH)"", ) severity_group.add_argument( ""--severity-level"", dest=""severity_string"", action=""store"", help=""report only issues of a given severity level or higher."" ' ""all"" and ""low"" are likely to produce the same results, but it' "" is possible for rules to be undefined which will"" ' not be listed in ""low"".', choices=[""all"", ""low"", ""medium"", ""high""], ) confidence_group = parser.add_mutually_exclusive_group(required=False) confidence_group.add_argument( ""-i"", ""--confidence"", dest=""confidence"", action=""count"", default=1, help=""report only issues of a given confidence level or "" ""higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)"", ) confidence_group.add_argument( ""--confidence-level"", dest=""confidence_string"", action=""store"", help=""report only issues of a given confidence level or higher."" ' ""all"" and ""low"" are likely to produce the same results, but it' "" is possible for rules to be undefined which will"" ' not be listed in ""low"".', choices=[""all"", ""low"", ""medium"", ""high""], ) output_format = ( ""screen"" if ( sys.stdout.isatty() and os.getenv(""NO_COLOR"") is None and os.getenv(""TERM"") != ""dumb"" ) else ""txt"" ) parser.add_argument( ""-f"", ""--format"", dest=""output_format"", action=""store"", default=output_format, help=""specify output format"", choices=sorted(extension_mgr.formatter_names), ) parser.add_argument( ""--msg-template"", action=""store"", default=None, help=""specify output message template"" "" (only usable with --format custom),"" "" see CUSTOM FORMAT section"" "" for list of available values"", ) parser.add_argument( ""-o"", ""--output"", dest=""output_file"", action=""store"", nargs=""?"", type=argparse.FileType(""w"", encoding=""utf-8""), default=sys.stdout, help=""write report to filename"", ) group = parser.add_mutually_exclusive_group(required=False) group.add_argument( ""-v"", ""--verbose"", dest=""verbose"", action=""store_true"", help=""output extra information like excluded and included files"", ) parser.add_argument( ""-d"", ""--debug"", dest=""debug"", action=""store_true"", help=""turn on debug mode"", ) group.add_argument( ""-q"", ""--quiet"", ""--silent"", dest=""quiet"", action=""store_true"", help=""only show output in the case of an error"", ) parser.add_argument( ""--ignore-nosec"", dest=""ignore_nosec"", action=""store_true"", help=""do not skip lines with # nosec comments"", ) parser.add_argument( ""-x"", ""--exclude"", dest=""excluded_paths"", action=""store"", default="","".join(constants.EXCLUDE), help=""comma-separated list of paths (glob patterns "" ""supported) to exclude from scan "" ""(note that these are in addition to the excluded "" ""paths provided in the config file) (default: "" + "","".join(constants.EXCLUDE) + "")"", ) parser.add_argument( ""-b"", ""--baseline"", dest=""baseline"", action=""store"", default=None, help=""path of a baseline report to compare against "" ""(only JSON-formatted files are accepted)"", ) parser.add_argument( ""--ini"", dest=""ini_path"", action=""store"", default=None, help=""path to a .bandit file that supplies command line arguments"", ) parser.add_argument( ""--exit-zero"", action=""store_true"", dest=""exit_zero"", default=False, help=""exit with 0, "" ""even with results found"", ) python_ver = sys.version.replace(""\n"", """") parser.add_argument( ""--version"", action=""version"", version=""%(prog)s {version}\n python version = {python}"".format( version=bandit.__version__, python=python_ver ), ) parser.set_defaults(debug=False) parser.set_defaults(verbose=False) parser.set_defaults(quiet=False) parser.set_defaults(ignore_nosec=False) plugin_info = [ f""{a[0]}\t{a[1].name}"" for a in extension_mgr.plugins_by_id.items() ] blacklist_info = [] for a in extension_mgr.blacklist.items(): for b in a[1]: blacklist_info.append(""{}\t{}"".format(b[""id""], b[""name""])) plugin_list = ""\n\t"".join(sorted(set(plugin_info + blacklist_info))) dedent_text = textwrap.dedent( """""" CUSTOM FORMATTING ----------------- Available tags: {abspath}, {relpath}, {line}, {col}, {test_id}, {severity}, {msg}, {confidence}, {range} Example usage: Default template: bandit -r examples/ --format custom --msg-template \\ ""{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}"" Provides same output as: bandit -r examples/ --format custom Tags can also be formatted in python string.format() style: bandit -r examples/ --format custom --msg-template \\ ""{relpath:20.20s}: {line:03}: {test_id:^8}: DEFECT: {msg:>20}"" See python documentation for more information about formatting style: https://docs.python.org/3/library/string.html The following tests were discovered and loaded: ----------------------------------------------- """""" ) parser.epilog = dedent_text + f""\t{plugin_list}"" # setup work - parse arguments, and initialize BanditManager args = parser.parse_args() # Check if `--msg-template` is not present without custom formatter if args.output_format != ""custom"" and args.msg_template is not None: parser.error(""--msg-template can only be used with --format=custom"") # Check if confidence or severity level have been specified with strings if args.severity_string is not None: if args.severity_string == ""all"": args.severity = 1 elif args.severity_string == ""low"": args.severity = 2 elif args.severity_string == ""medium"": args.severity = 3 elif args.severity_string == ""high"": args.severity = 4 # Other strings will be blocked by argparse if args.confidence_string is not None: if args.confidence_string == ""all"": args.confidence = 1 elif args.confidence_string == ""low"": args.confidence = 2 elif args.confidence_string == ""medium"": args.confidence = 3 elif args.confidence_string == ""high"": args.confidence = 4 # Other strings will be blocked by argparse try: b_conf = b_config.BanditConfig(config_file=args.config_file) except utils.ConfigError as e: LOG.error(e) sys.exit(2) # Handle .bandit files in projects to pass cmdline args from file ini_options = _get_options_from_ini(args.ini_path, args.targets) if ini_options: # prefer command line, then ini file args.excluded_paths = _log_option_source( parser.get_default(""excluded_paths""), args.excluded_paths, ini_options.get(""exclude""), ""excluded paths"", ) args.skips = _log_option_source( parser.get_default(""skips""), args.skips, ini_options.get(""skips""), ""skipped tests"", ) args.tests = _log_option_source( parser.get_default(""tests""), args.tests, ini_options.get(""tests""), ""selected tests"", ) ini_targets = ini_options.get(""targets"") if ini_targets: ini_targets = ini_targets.split("","") args.targets = _log_option_source( parser.get_default(""targets""), args.targets, ini_targets, ""selected targets"", ) # TODO(tmcpeak): any other useful options to pass from .bandit? args.recursive = _log_option_source( parser.get_default(""recursive""), args.recursive, ini_options.get(""recursive""), ""recursive scan"", ) args.agg_type = _log_option_source( parser.get_default(""agg_type""), args.agg_type, ini_options.get(""aggregate""), ""aggregate output type"", ) args.context_lines = _log_option_source( parser.get_default(""context_lines""), args.context_lines, (None if ini_options.get(""number"") is None else int(ini_options.get(""number""))), ""max code lines output for issue"", ) args.profile = _log_option_source( parser.get_default(""profile""), args.profile, ini_options.get(""profile""), ""profile"", ) args.severity = _log_option_source( parser.get_default(""severity""), args.severity, ini_options.get(""level""), ""severity level"", ) args.confidence = _log_option_source( parser.get_default(""confidence""), args.confidence, ini_options.get(""confidence""), ""confidence level"", ) args.output_format = _log_option_source( parser.get_default(""output_format""), args.output_format, ini_options.get(""format""), ""output format"", ) args.msg_template = _log_option_source( parser.get_default(""msg_template""), args.msg_template, ini_options.get(""msg-template""), ""output message template"", ) args.output_file = _log_option_source( parser.get_default(""output_file""), args.output_file, ini_options.get(""output""), ""output file"", ) args.verbose = _log_option_source( parser.get_default(""verbose""), args.verbose, ini_options.get(""verbose""), ""output extra information"", ) args.debug = _log_option_source( parser.get_default(""debug""), args.debug, ini_options.get(""debug""), ""debug mode"", ) args.quiet = _log_option_source( parser.get_default(""quiet""), args.quiet, ini_options.get(""quiet""), ""silent mode"", ) args.ignore_nosec = _log_option_source( parser.get_default(""ignore_nosec""), args.ignore_nosec, ini_options.get(""ignore-nosec""), ""do not skip lines with # nosec"", ) args.baseline = _log_option_source( parser.get_default(""baseline""), args.baseline, ini_options.get(""baseline""), ""path of a baseline report"", ) if not args.targets: parser.print_usage() sys.exit(2) # if the log format string was set in the options, reinitialize if b_conf.get_option(""log_format""): log_format = b_conf.get_option(""log_format"") _init_logger(log_level=logging.DEBUG, log_format=log_format) if args.quiet: _init_logger(log_level=logging.WARN) try: profile = _get_profile(b_conf, args.profile, args.config_file) _log_info(args, profile) profile[""include""].update(args.tests.split("","") if args.tests else []) profile[""exclude""].update(args.skips.split("","") if args.skips else []) extension_mgr.validate_profile(profile) except (utils.ProfileNotFound, ValueError) as e: LOG.error(e) sys.exit(2) b_mgr = b_manager.BanditManager( b_conf, args.agg_type, args.debug, profile=profile, verbose=args.verbose, quiet=args.quiet, ignore_nosec=args.ignore_nosec, ) if args.baseline is not None: try: with open(args.baseline) as bl: data = bl.read() b_mgr.populate_baseline(data) except OSError: LOG.warning(""Could not open baseline report: %s"", args.baseline) sys.exit(2) if args.output_format not in baseline_formatters: LOG.warning( ""Baseline must be used with one of the following "" ""formats: "" + str(baseline_formatters) ) sys.exit(2) if args.output_format != ""json"": if args.config_file: LOG.info(""using config: %s"", args.config_file) LOG.info( ""running on Python %d.%d.%d"", sys.version_info.major, sys.version_info.minor, sys.version_info.micro, ) # initiate file discovery step within Bandit Manager b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths) if not b_mgr.b_ts.tests: LOG.error(""No tests would be run, please check the profile."") sys.exit(2) # initiate execution of tests within Bandit Manager b_mgr.run_tests() LOG.debug(b_mgr.b_ma) LOG.debug(b_mgr.metrics) # trigger output of results by Bandit Manager sev_level = constants.RANKING[args.severity - 1] conf_level = constants.RANKING[args.confidence - 1] b_mgr.output_results( args.context_lines, sev_level, conf_level, args.output_file, args.output_format, args.msg_template, ) if ( b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0 and not args.exit_zero ): sys.exit(1) else: sys.exit(0) ","def main(): """"""Bandit CLI."""""" # bring our logging stuff up as early as possible debug = ( logging.DEBUG if ""-d"" in sys.argv or ""--debug"" in sys.argv else logging.INFO ) _init_logger(debug) extension_mgr = _init_extensions() baseline_formatters = [ f.name for f in filter( lambda x: hasattr(x.plugin, ""_accepts_baseline""), extension_mgr.formatters, ) ] # now do normal startup parser = argparse.ArgumentParser( description=""Bandit - a Python source code security analyzer"", formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( ""targets"", metavar=""targets"", type=str, nargs=""*"", help=""source file(s) or directory(s) to be tested"", ) parser.add_argument( ""-r"", ""--recursive"", dest=""recursive"", action=""store_true"", help=""find and process files in subdirectories"", ) parser.add_argument( ""-a"", ""--aggregate"", dest=""agg_type"", action=""store"", default=""file"", type=str, choices=[""file"", ""vuln""], help=""aggregate output by vulnerability (default) or by filename"", ) parser.add_argument( ""-n"", ""--number"", dest=""context_lines"", action=""store"", default=3, type=int, help=""maximum number of code lines to output for each issue"", ) parser.add_argument( ""-c"", ""--configfile"", dest=""config_file"", action=""store"", default=None, type=str, help=""optional config file to use for selecting plugins and "" ""overriding defaults"", ) parser.add_argument( ""-p"", ""--profile"", dest=""profile"", action=""store"", default=None, type=str, help=""profile to use (defaults to executing all tests)"", ) parser.add_argument( ""-t"", ""--tests"", dest=""tests"", action=""store"", default=None, type=str, help=""comma-separated list of test IDs to run"", ) parser.add_argument( ""-s"", ""--skip"", dest=""skips"", action=""store"", default=None, type=str, help=""comma-separated list of test IDs to skip"", ) severity_group = parser.add_mutually_exclusive_group(required=False) severity_group.add_argument( ""-l"", ""--level"", dest=""severity"", action=""count"", default=1, help=""report only issues of a given severity level or "" ""higher (-l for LOW, -ll for MEDIUM, -lll for HIGH)"", ) severity_group.add_argument( ""--severity-level"", dest=""severity_string"", action=""store"", help=""report only issues of a given severity level or higher."" ' ""all"" and ""low"" are likely to produce the same results, but it' "" is possible for rules to be undefined which will"" ' not be listed in ""low"".', choices=[""all"", ""low"", ""medium"", ""high""], ) confidence_group = parser.add_mutually_exclusive_group(required=False) confidence_group.add_argument( ""-i"", ""--confidence"", dest=""confidence"", action=""count"", default=1, help=""report only issues of a given confidence level or "" ""higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)"", ) confidence_group.add_argument( ""--confidence-level"", dest=""confidence_string"", action=""store"", help=""report only issues of a given confidence level or higher."" ' ""all"" and ""low"" are likely to produce the same results, but it' "" is possible for rules to be undefined which will"" ' not be listed in ""low"".', choices=[""all"", ""low"", ""medium"", ""high""], ) output_format = ( ""screen"" if ( sys.stdout.isatty() and os.getenv(""NO_COLOR"") is None and os.getenv(""TERM"") != ""dumb"" ) else ""txt"" ) parser.add_argument( ""-f"", ""--format"", dest=""output_format"", action=""store"", default=output_format, help=""specify output format"", choices=sorted(extension_mgr.formatter_names), ) parser.add_argument( ""--msg-template"", action=""store"", default=None, help=""specify output message template"" "" (only usable with --format custom),"" "" see CUSTOM FORMAT section"" "" for list of available values"", ) parser.add_argument( ""-o"", ""--output"", dest=""output_file"", action=""store"", nargs=""?"", type=argparse.FileType(""w"", encoding=""utf-8""), default=sys.stdout, help=""write report to filename"", ) group = parser.add_mutually_exclusive_group(required=False) group.add_argument( ""-v"", ""--verbose"", dest=""verbose"", action=""store_true"", help=""output extra information like excluded and included files"", ) parser.add_argument( ""-d"", ""--debug"", dest=""debug"", action=""store_true"", help=""turn on debug mode"", ) group.add_argument( ""-q"", ""--quiet"", ""--silent"", dest=""quiet"", action=""store_true"", help=""only show output in the case of an error"", ) parser.add_argument( ""--ignore-nosec"", dest=""ignore_nosec"", action=""store_true"", help=""do not skip lines with # nosec comments"", ) parser.add_argument( ""-x"", ""--exclude"", dest=""excluded_paths"", action=""store"", default="","".join(constants.EXCLUDE), help=""comma-separated list of paths (glob patterns "" ""supported) to exclude from scan "" ""(note that these are in addition to the excluded "" ""paths provided in the config file) (default: "" + "","".join(constants.EXCLUDE) + "")"", ) parser.add_argument( ""-b"", ""--baseline"", dest=""baseline"", action=""store"", default=None, help=""path of a baseline report to compare against "" ""(only JSON-formatted files are accepted)"", ) parser.add_argument( ""--ini"", dest=""ini_path"", action=""store"", default=None, help=""path to a .bandit file that supplies command line arguments"", ) parser.add_argument( ""--exit-zero"", action=""store_true"", dest=""exit_zero"", default=False, help=""exit with 0, "" ""even with results found"", ) python_ver = sys.version.replace(""\n"", """") parser.add_argument( ""--version"", action=""version"", version=""%(prog)s {version}\n python version = {python}"".format( version=bandit.__version__, python=python_ver ), ) parser.set_defaults(debug=False) parser.set_defaults(verbose=False) parser.set_defaults(quiet=False) parser.set_defaults(ignore_nosec=False) plugin_info = [ f""{a[0]}\t{a[1].name}"" for a in extension_mgr.plugins_by_id.items() ] blacklist_info = [] for a in extension_mgr.blacklist.items(): for b in a[1]: blacklist_info.append(""{}\t{}"".format(b[""id""], b[""name""])) plugin_list = ""\n\t"".join(sorted(set(plugin_info + blacklist_info))) dedent_text = textwrap.dedent( """""" CUSTOM FORMATTING ----------------- Available tags: {abspath}, {relpath}, {line}, {col}, {test_id}, {severity}, {msg}, {confidence}, {range} Example usage: Default template: bandit -r examples/ --format custom --msg-template \\ ""{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}"" Provides same output as: bandit -r examples/ --format custom Tags can also be formatted in python string.format() style: bandit -r examples/ --format custom --msg-template \\ ""{relpath:20.20s}: {line:03}: {test_id:^8}: DEFECT: {msg:>20}"" See python documentation for more information about formatting style: https://docs.python.org/3/library/string.html The following tests were discovered and loaded: ----------------------------------------------- """""" ) parser.epilog = dedent_text + f""\t{plugin_list}"" # setup work - parse arguments, and initialize BanditManager args = parser.parse_args() # Check if `--msg-template` is not present without custom formatter if args.output_format != ""custom"" and args.msg_template is not None: parser.error(""--msg-template can only be used with --format=custom"") # Check if confidence or severity level have been specified with strings if args.severity_string is not None: if args.severity_string == ""all"": args.severity = 1 elif args.severity_string == ""low"": args.severity = 2 elif args.severity_string == ""medium"": args.severity = 3 elif args.severity_string == ""high"": args.severity = 4 # Other strings will be blocked by argparse if args.confidence_string is not None: if args.confidence_string == ""all"": args.confidence = 1 elif args.confidence_string == ""low"": args.confidence = 2 elif args.confidence_string == ""medium"": args.confidence = 3 elif args.confidence_string == ""high"": args.confidence = 4 # Other strings will be blocked by argparse try: b_conf = b_config.BanditConfig(config_file=args.config_file) except utils.ConfigError as e: LOG.error(e) sys.exit(2) # Handle .bandit files in projects to pass cmdline args from file ini_options = _get_options_from_ini(args.ini_path, args.targets) if ini_options: # prefer command line, then ini file args.excluded_paths = _log_option_source( parser.get_default(""excluded_paths""), args.excluded_paths, ini_options.get(""exclude""), ""excluded paths"", ) args.skips = _log_option_source( parser.get_default(""skips""), args.skips, ini_options.get(""skips""), ""skipped tests"", ) args.tests = _log_option_source( parser.get_default(""tests""), args.tests, ini_options.get(""tests""), ""selected tests"", ) ini_targets = ini_options.get(""targets"") if ini_targets: ini_targets = ini_targets.split("","") args.targets = _log_option_source( parser.get_default(""targets""), args.targets, ini_targets, ""selected targets"", ) # TODO(tmcpeak): any other useful options to pass from .bandit? args.recursive = _log_option_source( parser.get_default(""recursive""), args.recursive, ini_options.get(""recursive""), ""recursive scan"", ) args.agg_type = _log_option_source( parser.get_default(""agg_type""), args.agg_type, ini_options.get(""aggregate""), ""aggregate output type"", ) args.context_lines = _log_option_source( parser.get_default(""context_lines""), args.context_lines, int(ini_options.get(""number"") or 0) or None, ""max code lines output for issue"", ) args.profile = _log_option_source( parser.get_default(""profile""), args.profile, ini_options.get(""profile""), ""profile"", ) args.severity = _log_option_source( parser.get_default(""severity""), args.severity, ini_options.get(""level""), ""severity level"", ) args.confidence = _log_option_source( parser.get_default(""confidence""), args.confidence, ini_options.get(""confidence""), ""confidence level"", ) args.output_format = _log_option_source( parser.get_default(""output_format""), args.output_format, ini_options.get(""format""), ""output format"", ) args.msg_template = _log_option_source( parser.get_default(""msg_template""), args.msg_template, ini_options.get(""msg-template""), ""output message template"", ) args.output_file = _log_option_source( parser.get_default(""output_file""), args.output_file, ini_options.get(""output""), ""output file"", ) args.verbose = _log_option_source( parser.get_default(""verbose""), args.verbose, ini_options.get(""verbose""), ""output extra information"", ) args.debug = _log_option_source( parser.get_default(""debug""), args.debug, ini_options.get(""debug""), ""debug mode"", ) args.quiet = _log_option_source( parser.get_default(""quiet""), args.quiet, ini_options.get(""quiet""), ""silent mode"", ) args.ignore_nosec = _log_option_source( parser.get_default(""ignore_nosec""), args.ignore_nosec, ini_options.get(""ignore-nosec""), ""do not skip lines with # nosec"", ) args.baseline = _log_option_source( parser.get_default(""baseline""), args.baseline, ini_options.get(""baseline""), ""path of a baseline report"", ) if not args.targets: parser.print_usage() sys.exit(2) # if the log format string was set in the options, reinitialize if b_conf.get_option(""log_format""): log_format = b_conf.get_option(""log_format"") _init_logger(log_level=logging.DEBUG, log_format=log_format) if args.quiet: _init_logger(log_level=logging.WARN) try: profile = _get_profile(b_conf, args.profile, args.config_file) _log_info(args, profile) profile[""include""].update(args.tests.split("","") if args.tests else []) profile[""exclude""].update(args.skips.split("","") if args.skips else []) extension_mgr.validate_profile(profile) except (utils.ProfileNotFound, ValueError) as e: LOG.error(e) sys.exit(2) b_mgr = b_manager.BanditManager( b_conf, args.agg_type, args.debug, profile=profile, verbose=args.verbose, quiet=args.quiet, ignore_nosec=args.ignore_nosec, ) if args.baseline is not None: try: with open(args.baseline) as bl: data = bl.read() b_mgr.populate_baseline(data) except OSError: LOG.warning(""Could not open baseline report: %s"", args.baseline) sys.exit(2) if args.output_format not in baseline_formatters: LOG.warning( ""Baseline must be used with one of the following "" ""formats: "" + str(baseline_formatters) ) sys.exit(2) if args.output_format != ""json"": if args.config_file: LOG.info(""using config: %s"", args.config_file) LOG.info( ""running on Python %d.%d.%d"", sys.version_info.major, sys.version_info.minor, sys.version_info.micro, ) # initiate file discovery step within Bandit Manager b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths) if not b_mgr.b_ts.tests: LOG.error(""No tests would be run, please check the profile."") sys.exit(2) # initiate execution of tests within Bandit Manager b_mgr.run_tests() LOG.debug(b_mgr.b_ma) LOG.debug(b_mgr.metrics) # trigger output of results by Bandit Manager sev_level = constants.RANKING[args.severity - 1] conf_level = constants.RANKING[args.confidence - 1] b_mgr.output_results( args.context_lines, sev_level, conf_level, args.output_file, args.output_format, args.msg_template, ) if ( b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0 and not args.exit_zero ): sys.exit(1) else: sys.exit(0) " 58599,"def _max_len(value: Any, max_len: int = 20, add_addr: bool = False) -> str: """"""Abbreviate a string representation of an object to `max_len` characters. Args: value: Object to be represented as a string. max_len: Maximum return string length. add_addr: If True, will add part of the object address to the end of the string, e.g. to identify different instances of the same class. If False or if the value is an int, float, or bool, three dots (``...``) will be used instead. """""" string = str(value) if len(string) <= max_len: return string if add_addr and not isinstance(value, (int, float, bool)): result = string[: (max_len - 5)] result += ""_"" + hex(id(value))[-4:] return result result = string[: (max_len - 3)] result += ""..."" return result ","def _max_len(value: Any, max_len: int = 20, add_addr: bool = False) -> str: """"""Abbreviate a string representation of an object to `max_len` characters. Args: value: Object to be represented as a string. max_len: Maximum return string length. add_addr: If True, will add part of the object address to the end of the string, e.g. to identify different instances of the same class. If False or if the value is an int, float, or bool, three dots (``...``) will be used instead. """""" string = str(value) if len(string) <= max_len: return string if add_addr and not isinstance(value, (int, float, bool)): result = f""{string[: (max_len - 5)]}_{hex(id(value))[-4:]}"" return result result = f""{string[: (max_len - 3)]}..."" return result " 57169,"def recursive_items(dictionary: dict) -> Iterator[tuple]: """"""Yields an iterator containing tuples of key, value pairs Yields: tuple. Yields tuples of key, value pairs. """""" for key, value in dictionary.items(): if isinstance(value, dict): yield from recursive_items(value) else: yield (key, value) ","def recursive_items(dictionary: dict) -> Iterator[tuple]: """"""Yields an iterator containing tuples of key, value pairs Yields: tuple. Yields tuples of key, value pairs. """""" for key, value in dictionary.items(): if isinstance(value, dict): yield from recursive_items(value) else: yield (key, value) " 50089,"def brmesolve(H, psi0, tlist, a_ops=[], e_ops=[], c_ops=[], args={}, sec_cutoff=0.1, options=None): """""" Solves for the dynamics of a system using the Bloch-Redfield master equation, given an input Hamiltonian, Hermitian bath-coupling terms and their associated spectral functions, as well as possible Lindblad collapse operators. Parameters ---------- H : :class:`Qobj`, :class:`QobjEvo` Possibly time-dependent system Liouvillian or Hamiltonian as a Qobj or QobjEvo. list of [:class:`Qobj`, :class:`Coefficient`] or callable that can be made into :class:`QobjEvo` are also accepted. psi0: Qobj Initial density matrix or state vector (ket). tlist : array_like List of times for evaluating evolution a_ops : list of (a_op, spectra) Nested list of system operators that couple to the environment, and the corresponding bath spectra. a_op : :class:`qutip.Qobj`, :class:`qutip.QobjEvo` The operator coupling to the environment. Must be hermitian. spectra : :class:`Coefficient`, str, func The corresponding bath spectral responce. Can be a `Coefficient` using an 'w' args, a function of the frequence or a string. Coefficient build from a numpy array are understood as a function of ``w`` instead of ``t``. Function are expected to be of the signature ``f(w)`` or ``f(t, w, **args)``. The spectra function can depend on ``t`` if the corresponding ``a_op`` is a :class:`QobjEvo`. Example: .. code-block:: a_ops = [ (a+a.dag(), ('w>0', args={""w"": 0})), (QobjEvo(a+a.dag()), 'w > exp(-t)'), (QobjEvo([b+b.dag(), lambda t: ...]), lambda w: ...)), (c+c.dag(), SpectraCoefficient(coefficient(array, tlist=ws))), ] .. note: ``Cubic_Spline`` have been replaced by :class:`Coefficient`\: ``spline = qutip.coefficient(array, tlist=times)`` Whether the ``a_ops`` is time dependent is deceided by the type of the operator: :class:`Qobj` vs :class:`QobjEvo` instead of the type of the spectra. e_ops : list of :class:`Qobj` / callback function Single operator or list of operators for which to evaluate expectation values or callable or list of callable. Callable signature must be, `f(t: float, state: Qobj)`. See :func:`expect` for more detail of operator expectation c_ops : list of (:class:`QobjEvo`, :class:`QobjEvo` compatible format) List of collapse operators. args : dict Dictionary of parameters for time-dependent Hamiltonians and collapse operators. The key ``w`` is reserved for the spectra function. sec_cutoff : float {0.1} Cutoff for secular approximation. Use ``-1`` if secular approximation is not used when evaluating bath-coupling terms. options : :class:`qutip.solver.SolverOptions` Options for the solver. Returns ------- result: :class:`qutip.solver.Result` An instance of the class :class:`qutip.solver.Result`, which contains either an array of expectation values, for operators given in e_ops, or a list of states for the times specified by `tlist`. .. note: The option ``operator_data_type`` is used to determine in which format the bloch redfield tensor is computed. Use 'csr' for sparse and 'dense' for dense array. With 'data', it will try to use the same data type as the ``a_ops``, but it is usually less efficient than manually choosing it. """""" H = QobjEvo(H, args=args, tlist=tlist) c_ops = c_ops if c_ops is not None else [] if not isinstance(c_ops, (list, tuple)): c_ops = [c_ops] c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops] new_a_ops = [] for (a_op, spectra) in a_ops: aop = QobjEvo(a_op, args=args, tlist=tlist) if isinstance(spectra, str): new_a_ops.append( (aop, coefficient(spectra, args={**args, 'w':0}))) elif isinstance(spectra, InterCoefficient): new_a_ops.append((aop, SpectraCoefficient(spectra))) elif isinstance(spectra, Coefficient): new_a_ops.append((aop, spectra)) elif callable(spectra): sig = inspect.signature(spectra) if tuple(sig.parameters.keys()) == (""w"",): spec = SpectraCoefficient(coefficient(spectra)) else: spec = coefficient(spectra, args={**args, 'w':0}) new_a_ops.append((aop, spec)) else: raise TypeError(""a_ops's spectra not known"") solver = BRSolver( H, new_a_ops, c_ops, options=options, sec_cutoff=sec_cutoff, ) return solver.run(psi0, tlist, e_ops=e_ops) ","def brmesolve(H, psi0, tlist, a_ops=[], e_ops=[], c_ops=[], args={}, sec_cutoff=0.1, options=None): """""" Solves for the dynamics of a system using the Bloch-Redfield master equation, given an input Hamiltonian, Hermitian bath-coupling terms and their associated spectral functions, as well as possible Lindblad collapse operators. Parameters ---------- H : :class:`Qobj`, :class:`QobjEvo` Possibly time-dependent system Liouvillian or Hamiltonian as a Qobj or QobjEvo. list of [:class:`Qobj`, :class:`Coefficient`] or callable that can be made into :class:`QobjEvo` are also accepted. psi0: Qobj Initial density matrix or state vector (ket). tlist : array_like List of times for evaluating evolution a_ops : list of (a_op, spectra) Nested list of system operators that couple to the environment, and the corresponding bath spectra. a_op : :class:`qutip.Qobj`, :class:`qutip.QobjEvo` The operator coupling to the environment. Must be hermitian. spectra : :class:`Coefficient`, str, func The corresponding bath spectral responce. Can be a `Coefficient` using an 'w' args, a function of the frequence or a string. Coefficient build from a numpy array are understood as a function of ``w`` instead of ``t``. Function are expected to be of the signature ``f(w)`` or ``f(t, w, **args)``. The spectra function can depend on ``t`` if the corresponding ``a_op`` is a :class:`QobjEvo`. Example: .. code-block:: a_ops = [ (a+a.dag(), ('w>0', args={""w"": 0})), (QobjEvo(a+a.dag()), 'w > exp(-t)'), (QobjEvo([b+b.dag(), lambda t: ...]), lambda w: ...)), (c+c.dag(), SpectraCoefficient(coefficient(array, tlist=ws))), ] .. note: ``Cubic_Spline`` has been replaced by :class:`Coefficient`\: ``spline = qutip.coefficient(array, tlist=times)`` Whether the ``a_ops`` is time dependent is deceided by the type of the operator: :class:`Qobj` vs :class:`QobjEvo` instead of the type of the spectra. e_ops : list of :class:`Qobj` / callback function Single operator or list of operators for which to evaluate expectation values or callable or list of callable. Callable signature must be, `f(t: float, state: Qobj)`. See :func:`expect` for more detail of operator expectation c_ops : list of (:class:`QobjEvo`, :class:`QobjEvo` compatible format) List of collapse operators. args : dict Dictionary of parameters for time-dependent Hamiltonians and collapse operators. The key ``w`` is reserved for the spectra function. sec_cutoff : float {0.1} Cutoff for secular approximation. Use ``-1`` if secular approximation is not used when evaluating bath-coupling terms. options : :class:`qutip.solver.SolverOptions` Options for the solver. Returns ------- result: :class:`qutip.solver.Result` An instance of the class :class:`qutip.solver.Result`, which contains either an array of expectation values, for operators given in e_ops, or a list of states for the times specified by `tlist`. .. note: The option ``operator_data_type`` is used to determine in which format the bloch redfield tensor is computed. Use 'csr' for sparse and 'dense' for dense array. With 'data', it will try to use the same data type as the ``a_ops``, but it is usually less efficient than manually choosing it. """""" H = QobjEvo(H, args=args, tlist=tlist) c_ops = c_ops if c_ops is not None else [] if not isinstance(c_ops, (list, tuple)): c_ops = [c_ops] c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops] new_a_ops = [] for (a_op, spectra) in a_ops: aop = QobjEvo(a_op, args=args, tlist=tlist) if isinstance(spectra, str): new_a_ops.append( (aop, coefficient(spectra, args={**args, 'w':0}))) elif isinstance(spectra, InterCoefficient): new_a_ops.append((aop, SpectraCoefficient(spectra))) elif isinstance(spectra, Coefficient): new_a_ops.append((aop, spectra)) elif callable(spectra): sig = inspect.signature(spectra) if tuple(sig.parameters.keys()) == (""w"",): spec = SpectraCoefficient(coefficient(spectra)) else: spec = coefficient(spectra, args={**args, 'w':0}) new_a_ops.append((aop, spec)) else: raise TypeError(""a_ops's spectra not known"") solver = BRSolver( H, new_a_ops, c_ops, options=options, sec_cutoff=sec_cutoff, ) return solver.run(psi0, tlist, e_ops=e_ops) " 34084,"def _remove_protocol_from_url(url: str) -> str: """""" Helper function to remove protocol from URL if it exists. """""" parsed_url = urllib.parse.urlparse(url) if parsed_url.scheme: # Construct URL without protocol scheme = ""%s://"" % parsed_url.scheme return parsed_url.geturl().replace(scheme, """", 1) return url ","def _remove_protocol_from_url(url: str) -> str: """""" Helper function to remove protocol from URL if it exists. """""" parsed_url = urllib.parse.urlparse(url) if parsed_url.scheme: # Construct URL without protocol scheme = f""{parsed_url.scheme}://"" return parsed_url.geturl().replace(scheme, """", 1) return url " 48978,"def test_define_bucket_website_configuration(test_bucket): bucket = storage_define_bucket_website_configuration.\ define_bucket_website_configuration(test_bucket.name, ""index.html"", ""404.html"") website_val = { ""mainPageSuffix"": ""index.html"", ""notFoundPage"": ""404.html"" } assert bucket._properties[""website""] == website_val ","def test_define_bucket_website_configuration(test_bucket): bucket = storage_define_bucket_website_configuration.define_bucket_website_configuration( test_bucket.name, ""index.html"", ""404.html"") website_val = { ""mainPageSuffix"": ""index.html"", ""notFoundPage"": ""404.html"" } assert bucket._properties[""website""] == website_val " 422,"def iter_form_ids_by_xmlns(domain, xmlns): if xmlns: key = ['submission xmlns', domain, xmlns] else: key = ['submission', domain] endkey = key + [datetime.utcnow().isoformat()] # pull the first 1000 documents sorted by submission time LIMIT = 1000 results = XFormInstance.get_db().view( 'all_forms/view', startkey=key, endkey=endkey, reduce=False, include_docs=False, limit=LIMIT, descending=False, ).all() while results: for result in results: yield result['id'] # add the last document's submitted_on time to the startkey last_result = results[-1] startkey = key + [last_result['key'][-1]] # pull 1000 documents starting with the last document pulled in the previous iteration results = XFormInstance.get_db().view( 'all_forms/view', startkey=startkey, endkey=endkey, reduce=False, include_docs=False, limit=LIMIT, descending=False, ).all() # remove the first document in this new iteration so that we do not process it twice results.pop(0) ","def iter_form_ids_by_xmlns(domain, xmlns): if xmlns: key = ['submission xmlns', domain, xmlns] else: key = ['submission', domain] endkey = key + [datetime.utcnow().isoformat()] # pull the first 1000 documents sorted by submission time LIMIT = 1000 results = XFormInstance.get_db().view( 'all_forms/view', startkey=key, endkey=endkey, reduce=False, include_docs=False, limit=LIMIT, descending=False, ).all() while results: for result in results: yield result['id'] # add the last document's received_on time to the startkey last_result = results[-1] startkey = key + [last_result['key'][-1]] # pull 1000 documents starting with the last document pulled in the previous iteration results = XFormInstance.get_db().view( 'all_forms/view', startkey=startkey, endkey=endkey, reduce=False, include_docs=False, limit=LIMIT, descending=False, ).all() # remove the first document in this new iteration so that we do not process it twice results.pop(0) " 28091,"def journal_entry(cmdr, is_beta, system, station, entry, state): # Always update, even if we're not the *current* system or station provider. this.system_address = entry.get('SystemAddress') or this.system_address this.system = entry.get('StarSystem') or this.system # We need pop == 0 to set the value so as to clear 'x' in systems with # no stations. pop = entry.get('Population') if pop is not None: this.system_population = pop this.station = entry.get('StationName') or this.station this.station_marketid = entry.get('MarketID') or this.station_marketid # We might pick up StationName in DockingRequested, make sure we clear it if leaving if entry['event'] in ('Undocked', 'FSDJump', 'SupercruiseEntry'): this.station = None this.station_marketid = None if config.get('station_provider') == 'EDSM': this.station_link['text'] = this.station or (this.system_population and this.system_population > 0 and STATION_UNDOCKED or '') this.station_link['url'] = station_url(this.system, this.station) this.station_link.update_idletasks() # Update display of 'EDSM Status' image if this.system_link['text'] != system: this.system_link['text'] = system or '' this.system_link['image'] = '' this.system_link.update_idletasks() this.multicrew = bool(state['Role']) if 'StarPos' in entry: this.coordinates = entry['StarPos'] elif entry['event'] == 'LoadGame': this.coordinates = None if entry['event'] in ['LoadGame', 'Commander', 'NewCommander']: this.newgame = True this.newgame_docked = False this.navbeaconscan = 0 elif entry['event'] == 'StartUp': this.newgame = False this.newgame_docked = False this.navbeaconscan = 0 elif entry['event'] == 'Location': this.newgame = True this.newgame_docked = entry.get('Docked', False) this.navbeaconscan = 0 elif entry['event'] == 'NavBeaconScan': this.navbeaconscan = entry['NumBodies'] # Send interesting events to EDSM if config.getint('edsm_out') and not is_beta and not this.multicrew and credentials(cmdr) and entry['event'] not in this.discardedEvents: # Introduce transient states into the event transient = { '_systemName': system, '_systemCoordinates': this.coordinates, '_stationName': station, '_shipId': state['ShipID'], } entry.update(transient) if entry['event'] == 'LoadGame': # Synthesise Materials events on LoadGame since we will have missed it materials = { 'timestamp': entry['timestamp'], 'event': 'Materials', 'Raw': [ { 'Name': k, 'Count': v } for k,v in state['Raw'].items() ], 'Manufactured': [ { 'Name': k, 'Count': v } for k,v in state['Manufactured'].items() ], 'Encoded': [ { 'Name': k, 'Count': v } for k,v in state['Encoded'].items() ], } materials.update(transient) this.queue.put((cmdr, materials)) this.queue.put((cmdr, entry)) ","def journal_entry(cmdr, is_beta, system, station, entry, state): # Always update, even if we're not the *current* system or station provider. this.system_address = entry.get('SystemAddress') or this.system_address this.system = entry.get('StarSystem') or this.system # We need pop == 0 to set the value so as to clear 'x' in systems with # no stations. pop = entry.get('Population') if pop is not None: this.system_population = pop this.station = entry.get('StationName') or this.station this.station_marketid = entry.get('MarketID') or this.station_marketid this.station = entry.get('StationName', this.station) if entry['event'] in ('Undocked', 'FSDJump', 'SupercruiseEntry'): this.station = None this.station_marketid = None if config.get('station_provider') == 'EDSM': this.station_link['text'] = this.station or (this.system_population and this.system_population > 0 and STATION_UNDOCKED or '') this.station_link['url'] = station_url(this.system, this.station) this.station_link.update_idletasks() # Update display of 'EDSM Status' image if this.system_link['text'] != system: this.system_link['text'] = system or '' this.system_link['image'] = '' this.system_link.update_idletasks() this.multicrew = bool(state['Role']) if 'StarPos' in entry: this.coordinates = entry['StarPos'] elif entry['event'] == 'LoadGame': this.coordinates = None if entry['event'] in ['LoadGame', 'Commander', 'NewCommander']: this.newgame = True this.newgame_docked = False this.navbeaconscan = 0 elif entry['event'] == 'StartUp': this.newgame = False this.newgame_docked = False this.navbeaconscan = 0 elif entry['event'] == 'Location': this.newgame = True this.newgame_docked = entry.get('Docked', False) this.navbeaconscan = 0 elif entry['event'] == 'NavBeaconScan': this.navbeaconscan = entry['NumBodies'] # Send interesting events to EDSM if config.getint('edsm_out') and not is_beta and not this.multicrew and credentials(cmdr) and entry['event'] not in this.discardedEvents: # Introduce transient states into the event transient = { '_systemName': system, '_systemCoordinates': this.coordinates, '_stationName': station, '_shipId': state['ShipID'], } entry.update(transient) if entry['event'] == 'LoadGame': # Synthesise Materials events on LoadGame since we will have missed it materials = { 'timestamp': entry['timestamp'], 'event': 'Materials', 'Raw': [ { 'Name': k, 'Count': v } for k,v in state['Raw'].items() ], 'Manufactured': [ { 'Name': k, 'Count': v } for k,v in state['Manufactured'].items() ], 'Encoded': [ { 'Name': k, 'Count': v } for k,v in state['Encoded'].items() ], } materials.update(transient) this.queue.put((cmdr, materials)) this.queue.put((cmdr, entry)) " 26834,"def check_all_providers() -> List[str]: errors: List[str] = [] for extra, providers in EXTRAS_PROVIDERS_PACKAGES.items(): for provider in providers: provider_directory = get_provider_directory(provider) if not os.path.isdir(provider_directory): errors.append( f""The {extra} has provider {provider} that has"" f"" missing {provider_directory} directory"" ) continue if not os.path.exists(os.path.join(provider_directory, ""__init__.py"")): errors.append( f""The {extra} has provider {provider} that has"" f"" missing __init__.py in the {provider_directory} directory"" ) if not os.path.exists(os.path.join(provider_directory, ""README.md"")): errors.append( f""The {extra} has provider {provider} that has"" f"" missing README.md in the {provider_directory} directory"" ) return errors ","def check_all_providers() -> List[str]: errors: List[str] = [] for extra, providers in EXTRAS_PROVIDERS_PACKAGES.items(): for provider in providers: provider_directory = get_provider_directory(provider) if not os.path.isdir(provider_directory): errors.append( f""The {extra} has provider {provider} that has missing {provider_directory} directory"" ) continue if not os.path.exists(os.path.join(provider_directory, ""__init__.py"")): errors.append( f""The {extra} has provider {provider} that has"" f"" missing __init__.py in the {provider_directory} directory"" ) if not os.path.exists(os.path.join(provider_directory, ""README.md"")): errors.append( f""The {extra} has provider {provider} that has"" f"" missing README.md in the {provider_directory} directory"" ) return errors " 55574,"def _case_sensitive_fs(path): """""" True when filesystem at `path` is case sensitive, False otherwise """""" root = op.join(path, uuid4().hex) fnames = [root + suffix for suffix in 'aA'] try: for fname in fnames: with open(fname, 'wt') as fobj: fobj.write('text') written = glob(root + '*') finally: for fname in written: os.unlink(fname) return len(written) == 2 ","def _case_sensitive_fs(path): """"""True when filesystem at `path` is case sensitive, False otherwise. Checks this by attempting to write two files, one w/ upper case, one with lower. If after this only one file exists, the system is case-insensitive. """""" root = op.join(path, uuid4().hex) fnames = [root + suffix for suffix in 'aA'] try: for fname in fnames: with open(fname, 'wt') as fobj: fobj.write('text') written = glob(root + '*') finally: for fname in written: os.unlink(fname) return len(written) == 2 " 7169,"def entropy(image, selem, out=None, mask=None, shift_x=False, shift_y=False): """"""Local entropy. The entropy is computed using base 2 logarithm i.e. the filter returns the minimum number of bits needed to encode the local greylevel distribution. Parameters ---------- image : 2-D array (uint8, uint16) Input image. selem : 2-D array The neighborhood expressed as a 2-D array of 1's and 0's. out : 2-D array (same dtype as input) If None, a new array is allocated. mask : ndarray Mask array that defines (>0) area of the image included in the local neighborhood. If None, the complete image is used (default). shift_x, shift_y : int Offset added to the structuring element center point. Shift is bounded to the structuring element sizes (center must be inside the given structuring element). Returns ------- out : ndarray (double) Output image. References ---------- .. [1] https://en.wikipedia.org/wiki/Entropy_%28information_theory%29 Examples -------- >>> from skimage import data >>> from skimage.filters.rank import entropy >>> from skimage.morphology import disk >>> img = data.camera() >>> ent = entropy(img, disk(5)) """""" return _apply_scalar_per_pixel(generic_cy._entropy, image, selem, out=out, mask=mask, shift_x=shift_x, shift_y=shift_y, out_dtype=np.double) ","def entropy(image, selem, out=None, mask=None, shift_x=False, shift_y=False): """"""Local entropy. The entropy is computed using base 2 logarithm i.e. the filter returns the minimum number of bits needed to encode the local greylevel distribution. Parameters ---------- image : 2-D array (uint8, uint16) Input image. selem : 2-D array The neighborhood expressed as a 2-D array of 1's and 0's. out : 2-D array (same dtype as input) If None, a new array is allocated. mask : ndarray Mask array that defines (>0) area of the image included in the local neighborhood. If None, the complete image is used (default). shift_x, shift_y : int Offset added to the structuring element center point. Shift is bounded to the structuring element sizes (center must be inside the given structuring element). Returns ------- out : ndarray (double) Output image. References ---------- .. [1] `https://en.wikipedia.org/wiki/Entropy_(information_theory) `_ Examples -------- >>> from skimage import data >>> from skimage.filters.rank import entropy >>> from skimage.morphology import disk >>> img = data.camera() >>> ent = entropy(img, disk(5)) """""" return _apply_scalar_per_pixel(generic_cy._entropy, image, selem, out=out, mask=mask, shift_x=shift_x, shift_y=shift_y, out_dtype=np.double) " 58052,"def update_remote_system_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> str: """""" This command pushes local changes to the remote system. Args: client: XSOAR Client to use. args: args['data']: the data to send to the remote system args['entries']: the entries to send to the remote system args['incident_changed']: boolean telling us if the local incident indeed changed or not args['remote_incident_id']: the remote incident id params: entry_tags: the tags to pass to the entries (to separate between comments and work_notes) Returns: The remote incident id - ticket_id """""" parsed_args = UpdateRemoteSystemArgs(args) if parsed_args.delta: demisto.debug(f'Got the following delta keys {str(list(parsed_args.delta.keys()))}') # ticket_type = client.ticket_type ticket_id = parsed_args.remote_incident_id if parsed_args.incident_changed: demisto.debug(f'Incident changed: {parsed_args.incident_changed}') # Close ticket if needed if parsed_args.inc_status == IncidentStatus.DONE and params.get('close_ticket'): # Set status TOPdesk ticket to Closed demisto.debug('Close TOPdesk ticket') # Close with API call or set field and let mirroring handle it. # client.update_incident # TODO: Something with updated delta keys or not? # 'processingStatus', 'priority', 'urgency', 'impact' update_args = { 'id': ticket_id } for key in parsed_args.delta: update_args[key] = parsed_args.delta[key] demisto.debug(f'SZU update_args=[{update_args}]') client.update_incident(update_args) entries = parsed_args.entries if entries: demisto.debug(f'New entries {entries}') for entry in entries: demisto.debug(f'Sending entry {entry.get(""id"")}, type: {entry.get(""type"")}') # Mirroring files as entries if entry.get('type') == 3: path_res = demisto.getFilePath(entry.get('id')) full_file_name = path_res.get('name') file_name, file_extension = os.path.splitext(full_file_name) if not file_extension: file_extension = '' client.attachment_upload(incident_id=ticket_id, incident_number=None, file_entry=entry.get('id'), file_name=file_name + '_mirrored_from_xsoar' + file_extension, invisible_for_caller=False, file_description=f""Upload from xsoar: {file_name}.{file_extension}"") else: # Mirroring comment and work notes as entries xargs = { 'id': '', 'action': '', 'action_invisible_for_caller': False, } tags = entry.get('tags', []) if params.get('work_notes_tag') in tags: xargs['action_invisible_for_caller'] = True elif params.get('comments_tag') in tags: xargs['action_invisible_for_caller'] = False # Sometimes user is an empty str, not None, therefore nothing is displayed user = entry.get('user', 'dbot') if (user): duser = demisto.findUser(username=user) name = duser['name'] else: name = 'Xsoar dbot' text = f""Update from {name}:

{str(entry.get('contents', ''))}"" \ + ""

Mirrored from Cortex XSOAR"" # client.add_comment(ticket_id, ticket_type, key, text) xargs['id'] = ticket_id xargs['action'] = text client.update_incident(xargs) return ticket_id ","def update_remote_system_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> str: """""" This command pushes local changes to the remote system. Args: client: XSOAR Client to use. args: args['data']: the data to send to the remote system args['entries']: the entries to send to the remote system args['incident_changed']: boolean telling us if the local incident indeed changed or not args['remote_incident_id']: the remote incident id params: entry_tags: the tags to pass to the entries (to separate between comments and work_notes) Returns: The remote incident id - ticket_id """""" parsed_args = UpdateRemoteSystemArgs(args) if parsed_args.delta: demisto.debug(f'Got the following delta keys {str(list(parsed_args.delta.keys()))}') # ticket_type = client.ticket_type ticket_id = parsed_args.remote_incident_id if parsed_args.incident_changed: demisto.debug(f'Incident changed: {parsed_args.incident_changed}') # Close ticket if needed if parsed_args.inc_status == IncidentStatus.DONE and params.get('close_ticket'): # Set status TOPdesk ticket to Closed demisto.debug('Close TOPdesk ticket') # Close with API call or set field and let mirroring handle it. # client.update_incident # TODO: Something with updated delta keys or not? # 'processingStatus', 'priority', 'urgency', 'impact' update_args = { 'id': ticket_id } for key in parsed_args.delta: if key in TOPDESK_ARGS: update_args[key] = parsed_args.delta[key] demisto.debug(f'SZU update_args=[{update_args}]') client.update_incident(update_args) entries = parsed_args.entries if entries: demisto.debug(f'New entries {entries}') for entry in entries: demisto.debug(f'Sending entry {entry.get(""id"")}, type: {entry.get(""type"")}') # Mirroring files as entries if entry.get('type') == 3: path_res = demisto.getFilePath(entry.get('id')) full_file_name = path_res.get('name') file_name, file_extension = os.path.splitext(full_file_name) if not file_extension: file_extension = '' client.attachment_upload(incident_id=ticket_id, incident_number=None, file_entry=entry.get('id'), file_name=file_name + '_mirrored_from_xsoar' + file_extension, invisible_for_caller=False, file_description=f""Upload from xsoar: {file_name}.{file_extension}"") else: # Mirroring comment and work notes as entries xargs = { 'id': '', 'action': '', 'action_invisible_for_caller': False, } tags = entry.get('tags', []) if params.get('work_notes_tag') in tags: xargs['action_invisible_for_caller'] = True elif params.get('comments_tag') in tags: xargs['action_invisible_for_caller'] = False # Sometimes user is an empty str, not None, therefore nothing is displayed user = entry.get('user', 'dbot') if (user): duser = demisto.findUser(username=user) name = duser['name'] else: name = 'Xsoar dbot' text = f""Update from {name}:

{str(entry.get('contents', ''))}"" \ + ""

Mirrored from Cortex XSOAR"" # client.add_comment(ticket_id, ticket_type, key, text) xargs['id'] = ticket_id xargs['action'] = text client.update_incident(xargs) return ticket_id " 56513,"def _get_version() -> str: from pathlib import Path import versioningit return versioningit.get_version(project_dir=Path(__file__).parent.parent) ","def _get_version() -> str: from pathlib import Path import versioningit qcodes_path = Path(__file__).parent return versioningit.get_version(project_dir=qcodes_path.parent) " 33118,"def with_recursion(func, prog=False): """"""Make function recursive in its 1st arg. Return a version of `func` whose 2nd argument (`k`) specifies the number of times to times apply func on its output. .. caution:: Only the first argument to `func` will change, so, for example, if `func` is `step(x, t, dt)`, it will get fed the same `t` and `dt` at each iteration. Parameters ---------- func : function Run the input function recursively. prog : bool or str Determine the mode of progress bar. Returns ------- fun_k : function A function that returns the sequence generated by recursively run func (Trajectory of model evolution). Stepping of dynamical system Examples -------- >>> def dxdt(x): ... return -x >>> step_1 = with_rk4(dxdt, autonom=True) >>> step_k = with_recursion(step_1) >>> x0 = np.arange(3) >>> x7 = step_k(x0, 7, t0=np.nan, dt=0.1)[-1] >>> x7_true = x0 * np.exp(-0.7) >>> np.allclose(x7, x7_true) True """""" def fun_k(x0, k, *args, **kwargs): xx = np.zeros((k+1,)+x0.shape) xx[0] = x0 rg = range(k) if isinstance(prog, str): rg = progbar(rg, prog) elif prog: rg = progbar(rg, 'Recurs.') for i in rg: xx[i+1] = func(xx[i], *args, **kwargs) return xx return fun_k ","def with_recursion(func, prog=False): """"""Make function recursive in its 1st arg. Return a version of `func` whose 2nd argument (`k`) specifies the number of times to times apply func on its output. .. caution:: Only the first argument to `func` will change, so, for example, if `func` is `step(x, t, dt)`, it will get fed the same `t` and `dt` at each iteration. Parameters ---------- func : function Run the input function recursively. prog : bool or str Determine the mode of progress bar. Returns ------- fun_k : function A function that returns the sequence generated by recursively running `func`, i.e. the trajectory of system's evolution. Stepping of dynamical system Examples -------- >>> def dxdt(x): ... return -x >>> step_1 = with_rk4(dxdt, autonom=True) >>> step_k = with_recursion(step_1) >>> x0 = np.arange(3) >>> x7 = step_k(x0, 7, t0=np.nan, dt=0.1)[-1] >>> x7_true = x0 * np.exp(-0.7) >>> np.allclose(x7, x7_true) True """""" def fun_k(x0, k, *args, **kwargs): xx = np.zeros((k+1,)+x0.shape) xx[0] = x0 rg = range(k) if isinstance(prog, str): rg = progbar(rg, prog) elif prog: rg = progbar(rg, 'Recurs.') for i in rg: xx[i+1] = func(xx[i], *args, **kwargs) return xx return fun_k " 56341,"def cross_chan_correlation( st1, streams, shift_len=0.0, allow_individual_trace_shifts=True, xcorr_func='fftw', concurrency=""concurrent"", cores=1, **kwargs): """""" Calculate cross-channel correlation. Determine the cross-channel correlation between two streams of multichannel seismic data. :type st1: obspy.core.stream.Stream :param st1: Stream one :type streams: list :param streams: Streams to compare to. :type shift_len: float :param shift_len: Seconds to shift the streams by (total value for negative and positive direction together) :type allow_individual_trace_shifts: bool :param allow_individual_trace_shifts: Controls whether templates are shifted by shift_len in relation to the picks as a whole, or whether each trace can be shifted individually. Defaults to True. :type xcorr_func: str, callable :param xcorr_func: The method for performing correlations. Accepts either a string or callable. See :func:`eqcorrscan.utils.correlate.register_array_xcorr` for more details :type concurrency: str :param concurrency: Concurrency for xcorr-func. :type cores: int :param cores: Number of threads to parallel over :returns: cross channel correlation, float - normalized by number of channels. locations of maximums :rtype: numpy.ndarray, numpy.ndarray .. Note:: If no matching channels were found then the coherance and index for that stream will be nan. """""" # Cut all channels in stream-list to be the correct length (shorter than # st1 if stack = False by shift_len). allow_individual_trace_shifts =\ allow_individual_trace_shifts and shift_len > 0 n_streams = len(streams) df = st1[0].stats.sampling_rate end_trim = int((shift_len * df) / 2) _streams = [] if end_trim > 0: for stream in streams: _stream = stream.copy() # Do not work on the users data for tr in _stream: tr.data = tr.data[end_trim: -end_trim] if tr.stats.sampling_rate != df: raise NotImplementedError(""Sampling rates differ"") _streams.append(_stream) streams = _streams else: # _prep_data_for_correlation works in place on data. # We need to copy it first. streams = [stream.copy() for stream in streams] # Check which channels are in st1 and match those in the stream_list st1, prep_streams, stream_indexes = _prep_data_for_correlation( stream=st1.copy(), templates=streams, template_names=list(range(len(streams))), force_stream_epoch=False) # Run the correlations multichannel_normxcorr = get_stream_xcorr(xcorr_func, concurrency) [cccsums, no_chans, _] = multichannel_normxcorr( templates=prep_streams, stream=st1, cores=cores, stack=False, **kwargs) # Find maximas, sum and divide by no_chans if allow_individual_trace_shifts: coherances = cccsums.max(axis=-1).sum(axis=-1) / no_chans else: cccsums = cccsums.sum(axis=1) coherances = cccsums.max(axis=-1) / no_chans # Subtract half length of correlogram and convert positions to seconds positions = (cccsums.argmax(axis=-1) - end_trim) / df # This section re-orders the coherences to correspond to the order of the # input streams _coherances = np.empty(n_streams) if allow_individual_trace_shifts: n_max_traces = max([len(st) for st in streams]) n_shifts_per_stream = positions.shape[1] _positions = np.empty([positions.shape[0], n_max_traces]) else: # _positions = np.empty_like(positions) _positions = np.empty([positions.shape[0], 1]) n_shifts_per_stream = 1 _coherances.fill(np.nan) _positions.fill(np.nan) for coh_ind, stream_ind in enumerate(stream_indexes): _coherances[stream_ind] = coherances[coh_ind] _positions[stream_ind, :n_shifts_per_stream] = positions[coh_ind] if not allow_individual_trace_shifts: # remove empty third axis from array _positions = _positions[:, ] return _coherances, _positions ","def cross_chan_correlation( st1, streams, shift_len=0.0, allow_individual_trace_shifts=True, xcorr_func='fftw', concurrency=""concurrent"", cores=1, **kwargs): """""" Calculate cross-channel correlation. Determine the cross-channel correlation between two streams of multichannel seismic data. :type st1: obspy.core.stream.Stream :param st1: Stream one :type streams: list :param streams: Streams to compare to. :type shift_len: float :param shift_len: Seconds to shift the streams by (total value for negative and positive direction together) :type allow_individual_trace_shifts: bool :param allow_individual_trace_shifts: Controls whether templates are shifted by shift_len in relation to the picks as a whole, or whether each trace can be shifted individually. Defaults to True. :type xcorr_func: str, callable :param xcorr_func: The method for performing correlations. Accepts either a string or callable. See :func:`eqcorrscan.utils.correlate.register_array_xcorr` for more details :type concurrency: str :param concurrency: Concurrency for xcorr-func. :type cores: int :param cores: Number of threads to parallel over :returns: cross channel correlation, float - normalized by number of channels. locations of maximums :rtype: numpy.ndarray, numpy.ndarray .. Note:: If no matching channels were found then the coherance and index for that stream will be nan. """""" # Cut all channels in stream-list to be the correct length (shorter than # st1 if stack = False by shift_len). allow_individual_trace_shifts = ( allow_individual_trace_shifts and shift_len > 0) n_streams = len(streams) df = st1[0].stats.sampling_rate end_trim = int((shift_len * df) / 2) _streams = [] if end_trim > 0: for stream in streams: _stream = stream.copy() # Do not work on the users data for tr in _stream: tr.data = tr.data[end_trim: -end_trim] if tr.stats.sampling_rate != df: raise NotImplementedError(""Sampling rates differ"") _streams.append(_stream) streams = _streams else: # _prep_data_for_correlation works in place on data. # We need to copy it first. streams = [stream.copy() for stream in streams] # Check which channels are in st1 and match those in the stream_list st1, prep_streams, stream_indexes = _prep_data_for_correlation( stream=st1.copy(), templates=streams, template_names=list(range(len(streams))), force_stream_epoch=False) # Run the correlations multichannel_normxcorr = get_stream_xcorr(xcorr_func, concurrency) [cccsums, no_chans, _] = multichannel_normxcorr( templates=prep_streams, stream=st1, cores=cores, stack=False, **kwargs) # Find maximas, sum and divide by no_chans if allow_individual_trace_shifts: coherances = cccsums.max(axis=-1).sum(axis=-1) / no_chans else: cccsums = cccsums.sum(axis=1) coherances = cccsums.max(axis=-1) / no_chans # Subtract half length of correlogram and convert positions to seconds positions = (cccsums.argmax(axis=-1) - end_trim) / df # This section re-orders the coherences to correspond to the order of the # input streams _coherances = np.empty(n_streams) if allow_individual_trace_shifts: n_max_traces = max([len(st) for st in streams]) n_shifts_per_stream = positions.shape[1] _positions = np.empty([positions.shape[0], n_max_traces]) else: # _positions = np.empty_like(positions) _positions = np.empty([positions.shape[0], 1]) n_shifts_per_stream = 1 _coherances.fill(np.nan) _positions.fill(np.nan) for coh_ind, stream_ind in enumerate(stream_indexes): _coherances[stream_ind] = coherances[coh_ind] _positions[stream_ind, :n_shifts_per_stream] = positions[coh_ind] if not allow_individual_trace_shifts: # remove empty third axis from array _positions = _positions[:, ] return _coherances, _positions " 7116,"def blind_richardson_lucy(image, psf=None, iterations=10, return_iterations=False, clip=False): """"""Blind Richardson-Lucy deconvolution. Parameters ---------- image : ndarray Input degraded image (can be N dimensional). psf : ndarray, optional A first estimate of the point spread function, same size as image iterations : int Number of iterations. This parameter plays the role of regularisation. After a given iterations, the estimates can produce division by 0 problems, then the algorithm is automatically stopped. return_iterations : boolean, optional Returns instead of a tuple of the final restorated image and used PSF a tuple of all iterations for further investigation clip : boolean, optional True by default. If true, pixel value of the result above 1 or under -1 are thresholded for skimage pipeline compatibility. Returns ------- im_deconv : ndarray The deconvolved image. psf : ndarray The last PSF estimate to deconvolve image Examples -------- >>> from skimage.restoration import blind_richardson_lucy >>> image = np.zeros((100,100)) >>> im[40:60, 45:55] = 1 >>> im[45:55, 40:60] = 1 >>> psf = np.zeros_like(image) >>> psf[50,50] = 1 >>> psf = gaussian(psf, 2) >>> image_conv = convolve2d(image, psf, 'same') >>> deconvolved, calc_psf = blind_richardson_lucy(image_conv, 10) Notes ----- This function estimates a point spread function based on an inverse Richardson Lucy algorithm as described in Fish et al., 1995. It is an iterative process where the PSF and image is deconvolved, respectively. It is more noise tolerant than other algorithms, such as Ayers-Dainty and the Weiner filter algorithms (taken from the paper). The algorithm performs well with gaussian PSFs and can recover them nicely without any prior knowledge. If one has already an educated guess, one should pass the PSF as argument to the function. Note, that the PSF should have the same shape as the image, and the PSF should be centered. Due to its nature, the algorithm may divide by 0. The function catches this issue and aborts the iterative process. Mostly, the optimal number of iterations is before this error may occur. References ---------- .. [1] Fish, D. A., A. M. Brinicombe, E. R. Pike, and J. G. Walker. ""Blind deconvolution by means of the Richardson–Lucy algorithm."" JOSA A 12, no. 1 (1995): 58-65. https://pdfs.semanticscholar.org/9e3f/a71e22caf358dbe873e9649f08c205d0c0c0.pdf """""" if return_iterations: all_iterations = np.empty((iterations, 2,) + image.shape) # Convert image to float for computations image = image.astype(np.float) # Initialize im_deconv and PSF im_deconv = np.full(image.shape, 0.5) if psf is None: psf = np.full(image.shape, 0.5) else: assert psf.shape == image.shape, \ 'Image and PSF should have the same shape!' psf = psf.astype(np.float) for i in range(iterations): # Deconvolve the PSF # Hack: in original publication one would have used `image`, # however, this does not work. # Using `im_deconv` instead recovers PSF. relative_blur_psf = im_deconv / fftconvolve(psf, im_deconv, 'same') # Check if relative_blur_psf contains nan, # causing the algorithm to fail if np.count_nonzero(~np.isnan(relative_blur_psf)) \ < relative_blur_psf.size: warnings.warn('Iterations stopped after {} iterations' ' because PSF contains zeros!'.format(i), RuntimeWarning) break else: psf *= fftconvolve(relative_blur_psf, im_deconv[::-1, ::-1], 'same') # Compute inverse again psf_mirror = psf[::-1, ::-1] # Standard Richardson-Lucy deconvolution relative_blur = image / fftconvolve(im_deconv, psf, 'same') im_deconv *= fftconvolve(relative_blur, psf_mirror, 'same') # Add iteration to list, if desired if return_iterations: all_iterations[i, 0] = im_deconv.copy() all_iterations[i, 1] = psf.copy() # Don't know if this makes sense here... if clip: im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv < -1] = -1 if return_iterations: return all_iterations else: return im_deconv, psf ","def blind_richardson_lucy(image, psf=None, iterations=10, return_iterations=False, clip=False): """"""Blind Richardson-Lucy deconvolution. Parameters ---------- image : ndarray Input degraded image (can be N dimensional). psf : ndarray, optional A first estimate of the point spread function, same size as image iterations : int Number of iterations. This parameter plays the role of regularisation. After a given iterations, the estimates can produce division by 0 problems, then the algorithm is automatically stopped. return_iterations : boolean, optional Returns instead of a tuple of the final restorated image and used PSF a tuple of all iterations for further investigation clip : boolean, optional True by default. If true, pixel value of the result above 1 or under -1 are thresholded for skimage pipeline compatibility. Returns ------- im_deconv : ndarray The deconvolved image. psf : ndarray The last PSF estimate to deconvolve image Examples -------- >>> from skimage.restoration import blind_richardson_lucy >>> image = np.zeros((100,100)) >>> im[40:60, 45:55] = 1 >>> im[45:55, 40:60] = 1 >>> psf = np.zeros_like(image) >>> psf[50,50] = 1 >>> psf = gaussian(psf, 2) >>> image_conv = convolve2d(image, psf, 'same') >>> deconvolved, calc_psf = blind_richardson_lucy(image_conv, 10) Notes ----- This function estimates a point spread function based on an inverse Richardson Lucy algorithm as described in Fish et al., 1995. It is an iterative process where the PSF and image is deconvolved, respectively. It is more noise tolerant than other algorithms, such as Ayers-Dainty and the Weiner filter algorithms (taken from the paper). The algorithm performs well with gaussian PSFs and can recover them nicely without any prior knowledge. If one has already an educated guess, one should pass the PSF as argument to the function. Note, that the PSF should have the same shape as the image, and the PSF should be centered. Due to its nature, the algorithm may divide by 0. The function catches this issue and aborts the iterative process. Mostly, the optimal number of iterations is before this error may occur. References ---------- .. [1] Fish, D. A., A. M. Brinicombe, E. R. Pike, and J. G. Walker. ""Blind deconvolution by means of the Richardson–Lucy algorithm."" JOSA A 12, no. 1 (1995): 58-65. :DOI:`10.1364/JOSAA.12.000058` https://pdfs.semanticscholar.org/9e3f/a71e22caf358dbe873e9649f08c205d0c0c0.pdf """""" if return_iterations: all_iterations = np.empty((iterations, 2,) + image.shape) # Convert image to float for computations image = image.astype(np.float) # Initialize im_deconv and PSF im_deconv = np.full(image.shape, 0.5) if psf is None: psf = np.full(image.shape, 0.5) else: assert psf.shape == image.shape, \ 'Image and PSF should have the same shape!' psf = psf.astype(np.float) for i in range(iterations): # Deconvolve the PSF # Hack: in original publication one would have used `image`, # however, this does not work. # Using `im_deconv` instead recovers PSF. relative_blur_psf = im_deconv / fftconvolve(psf, im_deconv, 'same') # Check if relative_blur_psf contains nan, # causing the algorithm to fail if np.count_nonzero(~np.isnan(relative_blur_psf)) \ < relative_blur_psf.size: warnings.warn('Iterations stopped after {} iterations' ' because PSF contains zeros!'.format(i), RuntimeWarning) break else: psf *= fftconvolve(relative_blur_psf, im_deconv[::-1, ::-1], 'same') # Compute inverse again psf_mirror = psf[::-1, ::-1] # Standard Richardson-Lucy deconvolution relative_blur = image / fftconvolve(im_deconv, psf, 'same') im_deconv *= fftconvolve(relative_blur, psf_mirror, 'same') # Add iteration to list, if desired if return_iterations: all_iterations[i, 0] = im_deconv.copy() all_iterations[i, 1] = psf.copy() # Don't know if this makes sense here... if clip: im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv < -1] = -1 if return_iterations: return all_iterations else: return im_deconv, psf " 40329,"def test_metrla() -> None: root = '/tmp/metr_la' n_previous_steps = 6 n_future_steps = 6 print(""This may take a couple of minutes..."") dataset = MetrLa(root=root, n_previous_steps=n_previous_steps, n_future_steps=n_future_steps) expected_dataset_len = 34260 # (hard-coded for 6-prev and 6-next) # Sanity checks assert len(dataset.gdrive_ids) == 4 # Path assertions assert dataset.raw_dir == f'{root}/raw' assert dataset.processed_dir == f'{root}/processed' assert dataset.io.dataset_len == expected_dataset_len # Pick a data point data = dataset[0] assert len(data) == 4 # Assert data shapes assert data.x.size() == (6, 207, 1) assert data.y.size() == (6, 207, 1) # Assert COO adjacency matrix shapes assert list(data.edge_index.size()) == [2, 1722] assert list(data.edge_attr.size()) == [1722] shutil.rmtree(root) ","def test_metrla() -> None: root = osp.join('/tmp', 'metr_la') n_previous_steps = 6 n_future_steps = 6 print(""This may take a couple of minutes..."") dataset = MetrLa(root=root, n_previous_steps=n_previous_steps, n_future_steps=n_future_steps) expected_dataset_len = 34260 # (hard-coded for 6-prev and 6-next) # Sanity checks assert len(dataset.gdrive_ids) == 4 # Path assertions assert dataset.raw_dir == f'{root}/raw' assert dataset.processed_dir == f'{root}/processed' assert dataset.io.dataset_len == expected_dataset_len # Pick a data point data = dataset[0] assert len(data) == 4 # Assert data shapes assert data.x.size() == (6, 207, 1) assert data.y.size() == (6, 207, 1) # Assert COO adjacency matrix shapes assert list(data.edge_index.size()) == [2, 1722] assert list(data.edge_attr.size()) == [1722] shutil.rmtree(root) " 40238,"def mesh_split_strip(mesh, edge): """"""Split the srip of faces corresponding to a given edge. Parameters ---------- mesh : :class:`compas.datastructures.Mesh` The input mesh. edge : tuple of int The edge identifying the strip. Returns ------- list of int The split vertices in the same order as the edges of the strip. """""" strip = mesh.edge_strip(edge) is_closed = strip[0] == strip[-1] ngons = [] splits = [] for u, v in strip[:-1]: ngons.append(mesh.halfedge_face(u, v)) splits.append(mesh.split_edge(u, v, t=0.5, allow_boundary=True)) if is_closed: splits.append(splits[0]) else: u, v = strip[-1] splits.append(mesh.split_edge(u, v, t=0.5, allow_boundary=True)) for (u, v), ngon in zip(pairwise(splits), ngons): mesh.split_face(ngon, u, v) return splits ","def mesh_split_strip(mesh, edge): """"""Split the strip of faces corresponding to a given edge. Parameters ---------- mesh : :class:`compas.datastructures.Mesh` The input mesh. edge : tuple of int The edge identifying the strip. Returns ------- list of int The split vertices in the same order as the edges of the strip. """""" strip = mesh.edge_strip(edge) is_closed = strip[0] == strip[-1] ngons = [] splits = [] for u, v in strip[:-1]: ngons.append(mesh.halfedge_face(u, v)) splits.append(mesh.split_edge(u, v, t=0.5, allow_boundary=True)) if is_closed: splits.append(splits[0]) else: u, v = strip[-1] splits.append(mesh.split_edge(u, v, t=0.5, allow_boundary=True)) for (u, v), ngon in zip(pairwise(splits), ngons): mesh.split_face(ngon, u, v) return splits " 10648,"def gmap(google_api_key, map_options, **kwargs): ''' Create a new :class:`~bokeh.plotting.gmap.GMap` for plotting. Args: google_api_key (str): Google requires an API key be supplied for maps to function. See: https://developers.google.com/maps/documentation/javascript/get-api-key map_options: (GMapOptions) Configuration specific to a Google Map In addition to the standard :class:`~bokeh.plotting.gmap.GMap` keyword arguments (e.g. ``plot_width`` or ``sizing_mode``), the following additional options can be passed as well: .. bokeh-options:: GMapFigureOptions :module: bokeh.plotting.gmap Returns: GMap Note: The Google API key provided will be stored for the function to work. ''' return GMap(api_key=google_api_key, map_options=map_options, **kwargs) ","def gmap(google_api_key, map_options, **kwargs): ''' Create a new :class:`~bokeh.plotting.gmap.GMap` for plotting. Args: google_api_key (str): Google requires an API key be supplied for maps to function. See: https://developers.google.com/maps/documentation/javascript/get-api-key map_options: (GMapOptions) Configuration specific to a Google Map In addition to the standard :class:`~bokeh.plotting.gmap.GMap` keyword arguments (e.g. ``plot_width`` or ``sizing_mode``), the following additional options can be passed as well: .. bokeh-options:: GMapFigureOptions :module: bokeh.plotting.gmap Returns: GMap The Google API key will be stored in the Bokeh Document JSON. ''' return GMap(api_key=google_api_key, map_options=map_options, **kwargs) " 3685,"def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, names=None, titles=None, aligned=False, byteorder=None): """"""Create a (read-only) record array from binary data contained in a string Parameters ---------- datastring : str Binary data contained in a string dtype : data-type, optional Valid dtype for all arrays shape : int or tuple of ints, optional Shape of each array. offset : int, optional Position in the file to start reading from. formats, names, titles, aligned, byteorder : If `dtype` is ``None``, these arguments are passed to `numpy.format_parser` to construct a dtype. See that function for detailed documentation. Returns ------- np.recarray record array consisting of data in datastring. Examples -------- >>> a = np.empty(10,dtype='f8,i4,a5') >>> a[5] = (0.5,10,'abcde') >>> b=np.core.records.fromstring(a.tostring(), formats='f8,i4,a5', shape=10, ... byteorder='<') >>> print(b[5]) (0.5, 10, 'abcde') >>> b.shape (10,) """""" if dtype is None and formats is None: raise TypeError(""fromstring() needs a 'dtype' or 'formats' argument"") if dtype is not None: descr = sb.dtype(dtype) else: descr = format_parser(formats, names, titles, aligned, byteorder).dtype itemsize = descr.itemsize # NumPy 1.19.0, 2020-01-01 shape = _deprecate_shape_0_as_None(shape) if shape in (None, -1): shape = (len(datastring) - offset) // itemsize _array = recarray(shape, descr, buf=datastring, offset=offset) return _array ","def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, names=None, titles=None, aligned=False, byteorder=None): """"""Create a (read-only) record array from binary data contained in a string Parameters ---------- datastring : bytes-like Binary data contained in a string dtype : data-type, optional Valid dtype for all arrays shape : int or tuple of ints, optional Shape of each array. offset : int, optional Position in the file to start reading from. formats, names, titles, aligned, byteorder : If `dtype` is ``None``, these arguments are passed to `numpy.format_parser` to construct a dtype. See that function for detailed documentation. Returns ------- np.recarray record array consisting of data in datastring. Examples -------- >>> a = np.empty(10,dtype='f8,i4,a5') >>> a[5] = (0.5,10,'abcde') >>> b=np.core.records.fromstring(a.tostring(), formats='f8,i4,a5', shape=10, ... byteorder='<') >>> print(b[5]) (0.5, 10, 'abcde') >>> b.shape (10,) """""" if dtype is None and formats is None: raise TypeError(""fromstring() needs a 'dtype' or 'formats' argument"") if dtype is not None: descr = sb.dtype(dtype) else: descr = format_parser(formats, names, titles, aligned, byteorder).dtype itemsize = descr.itemsize # NumPy 1.19.0, 2020-01-01 shape = _deprecate_shape_0_as_None(shape) if shape in (None, -1): shape = (len(datastring) - offset) // itemsize _array = recarray(shape, descr, buf=datastring, offset=offset) return _array " 44079,"def graph_to_tape(graph: MultiDiGraph) -> QuantumTape: """""" Converts a directed multigraph to the corresponding quantum tape. Args: graph (MultiDiGraph): directed multigraph containing measure to be converted to a tape Returns: tape (QuantumTape): the quantum tape corresponding to the input **Example** Consider the following ... : .. code-block:: python from pennylane.transforms import qcut wire_cut_0 = qml.WireCut(wires=0) wire_cut_1 = qml.WireCut(wires=1) multi_wire_cut = qml.WireCut(wires=[0, 1]) with qml.tape.QuantumTape() as tape: qml.RX(0.4, wires=0) qml.apply(wire_cut_0) qml.RY(0.5, wires=0) qml.apply(wire_cut_1) qml.CNOT(wires=[0, 1]) qml.apply(multi_wire_cut) qml.RZ(0.6, wires=1) qml.expval(qml.PauliZ(0)) We can find the subgraphs and corresponding tapes by using: >>> graph = qcut.tape_to_graph(tape) >>> qcut.replace_wire_cut_nodes(graph) >>> subgraphs, communication_graph = qcut.fragment_graph(graph) >>> tapes = [qcut.graph_to_tape(sg) for sg in subgraphs] >>> tapes [, , , , ] """""" wires = Wires.all_wires([n.wires for n in graph.nodes]) ordered_ops = sorted( [(order, op) for op, order in graph.nodes(data=""order"")], key=lambda x: x[0] ) wire_map = {w: w for w in wires} with QuantumTape() as tape: for _, op in ordered_ops: new_wires = [wire_map[w] for w in op.wires] op._wires = Wires(new_wires) # TODO: find a better way to update operation wires apply(op) if isinstance(op, MeasureNode): measured_wire = op.wires[0] new_wire = _find_new_wire(wires) wires += new_wire wire_map[measured_wire] = new_wire return tape ","def graph_to_tape(graph: MultiDiGraph) -> QuantumTape: """""" Converts a directed multigraph to the corresponding quantum tape. Args: graph (MultiDiGraph): directed multigraph containing measure to be converted to a tape Returns: tape (QuantumTape): the quantum tape corresponding to the input **Example** Consider the following ... : .. code-block:: python from pennylane.transforms import qcut wire_cut_0 = qml.WireCut(wires=0) wire_cut_1 = qml.WireCut(wires=1) multi_wire_cut = qml.WireCut(wires=[0, 1]) with qml.tape.QuantumTape() as tape: qml.RX(0.4, wires=0) qml.apply(wire_cut_0) qml.RY(0.5, wires=0) qml.apply(wire_cut_1) qml.CNOT(wires=[0, 1]) qml.apply(multi_wire_cut) qml.RZ(0.6, wires=1) qml.expval(qml.PauliZ(0)) We can find the circuit fragments by using: >>> graph = qcut.tape_to_graph(tape) >>> qcut.replace_wire_cut_nodes(graph) >>> subgraphs, communication_graph = qcut.fragment_graph(graph) >>> tapes = [qcut.graph_to_tape(sg) for sg in subgraphs] >>> tapes [, , , , ] """""" wires = Wires.all_wires([n.wires for n in graph.nodes]) ordered_ops = sorted( [(order, op) for op, order in graph.nodes(data=""order"")], key=lambda x: x[0] ) wire_map = {w: w for w in wires} with QuantumTape() as tape: for _, op in ordered_ops: new_wires = [wire_map[w] for w in op.wires] op._wires = Wires(new_wires) # TODO: find a better way to update operation wires apply(op) if isinstance(op, MeasureNode): measured_wire = op.wires[0] new_wire = _find_new_wire(wires) wires += new_wire wire_map[measured_wire] = new_wire return tape " 6839,"def validate_json(string): try: json.loads(string) except: frappe.throw(_(""Request Body consists of an invalid JSON structure"")) ","def validate_json(string): try: json.loads(string) except TypeError: frappe.throw(_(""Request Body consists of an invalid JSON structure"")) " 50543,"def _to_file( df, filename, driver=None, schema=None, index=None, mode=""w"", crs=None, **kwargs ): """""" Write this GeoDataFrame to an OGR data source A dictionary of supported OGR providers is available via: >>> import fiona >>> fiona.supported_drivers Parameters ---------- df : GeoDataFrame to be written filename : string File path or file handle to write to. driver : string, default None The OGR format driver used to write the vector file. If not specified, it attempt to infer it from the file extension. schema : dict, default None If specified, the schema dictionary is passed to Fiona to better control how the file is written. If None, GeoPandas will determine the schema based on each column's dtype index : bool, default None If True, write index into one or more columns (for MultiIndex). Default None writes the index into one or more columns only if the index is named, is a MultiIndex, or has a non-integer data type. If False, no index is written. .. versionadded:: 0.7 Previously the index was not written. mode : string, default 'w' The write mode, 'w' to overwrite the existing file and 'a' to append. Not all drivers support appending. The drivers that support appending are listed in fiona.supported_drivers or https://github.com/Toblerity/Fiona/blob/master/fiona/drvsupport.py crs : pyproj.CRS, default None If specified, the CRS is passed to Fiona to better control how the file is written. If None, GeoPandas will determine the crs based on crs df attribute. The value can be anything accepted by :meth:`pyproj.CRS.from_user_input() `, such as an authority string (eg ""EPSG:4326"") or a WKT string. The *kwargs* are passed to fiona.open and can be used to write to multi-layer data, store data within archives (zip files), etc. The path may specify a fiona VSI scheme. Notes ----- The format drivers will attempt to detect the encoding of your data, but may fail. In this case, the proper encoding can be specified explicitly by using the encoding keyword parameter, e.g. ``encoding='utf-8'``. """""" if index is None: # Determine if index attribute(s) should be saved to file index = list(df.index.names) != [None] or type(df.index) not in ( pd.RangeIndex, pd.Int64Index, ) if index: df = df.reset_index(drop=False) if schema is None: schema = infer_schema(df) if crs: crs = pyproj.CRS.from_user_input(crs) else: crs = df.crs if driver is None: driver = _detect_driver(filename) if driver == ""ESRI Shapefile"" and any([len(c) > 10 for c in df.columns.tolist()]): warnings.warn( ""Column names longer than 10 characters will be truncated when saved to "" ""ESRI Shapefile."", stacklevel=3, ) with fiona_env(): crs_wkt = None try: gdal_version = fiona.env.get_gdal_release_name() except AttributeError: gdal_version = ""2.0.0"" # just assume it is not the latest if LooseVersion(gdal_version) >= LooseVersion(""3.0.0"") and crs: crs_wkt = crs.to_wkt() elif crs: crs_wkt = crs.to_wkt(""WKT1_GDAL"") with fiona.open( filename, mode=mode, driver=driver, crs_wkt=crs_wkt, schema=schema, **kwargs ) as colxn: colxn.writerecords(df.iterfeatures()) ","def _to_file( df, filename, driver=None, schema=None, index=None, mode=""w"", crs=None, **kwargs ): """""" Write this GeoDataFrame to an OGR data source A dictionary of supported OGR providers is available via: >>> import fiona >>> fiona.supported_drivers Parameters ---------- df : GeoDataFrame to be written filename : string File path or file handle to write to. driver : string, default None The OGR format driver used to write the vector file. If not specified, it attempts to infer it from the file extension. If no extension is specified, it saves ESRI Shapefile to a folder. schema : dict, default None If specified, the schema dictionary is passed to Fiona to better control how the file is written. If None, GeoPandas will determine the schema based on each column's dtype index : bool, default None If True, write index into one or more columns (for MultiIndex). Default None writes the index into one or more columns only if the index is named, is a MultiIndex, or has a non-integer data type. If False, no index is written. .. versionadded:: 0.7 Previously the index was not written. mode : string, default 'w' The write mode, 'w' to overwrite the existing file and 'a' to append. Not all drivers support appending. The drivers that support appending are listed in fiona.supported_drivers or https://github.com/Toblerity/Fiona/blob/master/fiona/drvsupport.py crs : pyproj.CRS, default None If specified, the CRS is passed to Fiona to better control how the file is written. If None, GeoPandas will determine the crs based on crs df attribute. The value can be anything accepted by :meth:`pyproj.CRS.from_user_input() `, such as an authority string (eg ""EPSG:4326"") or a WKT string. The *kwargs* are passed to fiona.open and can be used to write to multi-layer data, store data within archives (zip files), etc. The path may specify a fiona VSI scheme. Notes ----- The format drivers will attempt to detect the encoding of your data, but may fail. In this case, the proper encoding can be specified explicitly by using the encoding keyword parameter, e.g. ``encoding='utf-8'``. """""" if index is None: # Determine if index attribute(s) should be saved to file index = list(df.index.names) != [None] or type(df.index) not in ( pd.RangeIndex, pd.Int64Index, ) if index: df = df.reset_index(drop=False) if schema is None: schema = infer_schema(df) if crs: crs = pyproj.CRS.from_user_input(crs) else: crs = df.crs if driver is None: driver = _detect_driver(filename) if driver == ""ESRI Shapefile"" and any([len(c) > 10 for c in df.columns.tolist()]): warnings.warn( ""Column names longer than 10 characters will be truncated when saved to "" ""ESRI Shapefile."", stacklevel=3, ) with fiona_env(): crs_wkt = None try: gdal_version = fiona.env.get_gdal_release_name() except AttributeError: gdal_version = ""2.0.0"" # just assume it is not the latest if LooseVersion(gdal_version) >= LooseVersion(""3.0.0"") and crs: crs_wkt = crs.to_wkt() elif crs: crs_wkt = crs.to_wkt(""WKT1_GDAL"") with fiona.open( filename, mode=mode, driver=driver, crs_wkt=crs_wkt, schema=schema, **kwargs ) as colxn: colxn.writerecords(df.iterfeatures()) " 27970,"def get_analyzer_checkers_cmd(clang_version_info, env, plugins, alpha=True, debug=True): """"""Return the checkers list which depends on the used clang version. plugins should be a list of path to clang plugins (so with checkers) Before clang9 alpha and debug checkers were printed by default. Since clang9 there are extra arguments to print the additional checkers. """""" major_version = clang_version_info.major_version command = [] for plugin in plugins: command.extend([""-load"", plugin]) command.append(""-analyzer-checker-help"") # The clang compiler os OSX is a few # relases older that the open source clang release. # The new checker help printig flags are not available there yet. # If the OSX clang will be updated to based on clang v8 # this early return can be removed. if clang_version_info.vendor != ""clang"": return command if alpha and major_version > 8: command.append(""-analyzer-checker-help-alpha"") if debug and major_version > 8: command.append(""-analyzer-checker-help-developer"") return command ","def get_analyzer_checkers_cmd(clang_version_info, env, plugins, alpha=True, debug=True): """"""Return the checkers list which depends on the used clang version. plugins should be a list of path to clang plugins (so with checkers) Before clang9 alpha and debug checkers were printed by default. Since clang9 there are extra arguments to print the additional checkers. """""" major_version = clang_version_info.major_version command = [] for plugin in plugins: command.extend([""-load"", plugin]) command.append(""-analyzer-checker-help"") # The clang compiler os OSX is a few # relases older than the open source clang release. # The new checker help printig flags are not available there yet. # If the OSX clang will be updated to based on clang v8 # this early return can be removed. if clang_version_info.vendor != ""clang"": return command if alpha and major_version > 8: command.append(""-analyzer-checker-help-alpha"") if debug and major_version > 8: command.append(""-analyzer-checker-help-developer"") return command " 2156,"def mean_variance_axis(X, axis, weights=None, return_sum_weights=False): """"""Compute mean and variance along an axix on a CSR or CSC matrix Parameters ---------- X : CSR or CSC sparse matrix, shape (n_samples, n_features) Input data. axis : int (either 0 or 1) Axis along which the axis should be computed. weights : ndarray, shape (n_samples,) or (n_features,) | None if axis is set to 0 shape is (n_samples,) or if axis is set to 1 shape is (n_features,). If it is set to None, then samples are equally weighted. return_sum_weights : bool If True, returns the sum of weights seen for each feature if axis=0 or each sample if axis=1. Returns ------- means : float array with shape (n_features,) Feature-wise means variances : float array with shape (n_features,) Feature-wise variances sum_weights : float array with shape (n_features,) Returned if return_sum_weights is True. """""" _raise_error_wrong_axis(axis) if isinstance(X, sp.csr_matrix): if axis == 0: return _csr_mean_var_axis0( X, weights=weights, return_sum_weights=return_sum_weights) else: return _csc_mean_var_axis0( X.T, weights=weights, return_sum_weights=return_sum_weights) elif isinstance(X, sp.csc_matrix): if axis == 0: return _csc_mean_var_axis0( X, weights=weights, return_sum_weights=return_sum_weights) else: return _csr_mean_var_axis0( X.T, weights=weights, return_sum_weights=return_sum_weights) else: _raise_typeerror(X) ","def mean_variance_axis(X, axis, weights=None, return_sum_weights=False): """"""Compute mean and variance along an axix on a CSR or CSC matrix Parameters ---------- X : CSR or CSC sparse matrix, shape (n_samples, n_features) Input data. axis : int (either 0 or 1) Axis along which the axis should be computed. weights : ndarray of shape (n_samples,) or (n_features,), default=None if axis is set to 0 shape is (n_samples,) or if axis is set to 1 shape is (n_features,). If it is set to None, then samples are equally weighted. return_sum_weights : bool If True, returns the sum of weights seen for each feature if axis=0 or each sample if axis=1. Returns ------- means : float array with shape (n_features,) Feature-wise means variances : float array with shape (n_features,) Feature-wise variances sum_weights : float array with shape (n_features,) Returned if return_sum_weights is True. """""" _raise_error_wrong_axis(axis) if isinstance(X, sp.csr_matrix): if axis == 0: return _csr_mean_var_axis0( X, weights=weights, return_sum_weights=return_sum_weights) else: return _csc_mean_var_axis0( X.T, weights=weights, return_sum_weights=return_sum_weights) elif isinstance(X, sp.csc_matrix): if axis == 0: return _csc_mean_var_axis0( X, weights=weights, return_sum_weights=return_sum_weights) else: return _csr_mean_var_axis0( X.T, weights=weights, return_sum_weights=return_sum_weights) else: _raise_typeerror(X) " 43584,"def qaoa_embedding_uniform(n_layers, n_wires, low=0, high=2 * pi, seed=None): r""""""Creates a parameter array for :func:`~.QAOAEmbedding`, drawn from a uniform distribution. Each parameter is drawn uniformly at random \ from between ``low`` and ``high``. The parameters define the the trainable angles of 'ZZ interactions' and the 'local fields'. Args: n_layers (int): number of layers n_wires (int): number of qubits low (float): minimum value of uniform distribution high (float): maximum value of uniform distribution seed (int): seed used in sampling the parameters, makes function call deterministic Returns: array: parameter array """""" if seed is not None: np.random.seed(seed) if n_wires == 1: shp = (n_layers, 1) elif n_wires == 2: shp = (n_layers, 3) else: shp = (n_layers, 2*n_wires) params = np.random.uniform(low=low, high=high, size=shp) return params ","def qaoa_embedding_uniform(n_layers, n_wires, low=0, high=2 * pi, seed=None): r""""""Creates a parameter array for :func:`~.QAOAEmbedding`, drawn from a uniform distribution. Each parameter is drawn uniformly at random from between ``low`` and ``high``. The parameters define the the trainable angles of 'ZZ interactions' and the 'local fields'. Args: n_layers (int): number of layers n_wires (int): number of qubits low (float): minimum value of uniform distribution high (float): maximum value of uniform distribution seed (int): seed used in sampling the parameters, makes function call deterministic Returns: array: parameter array """""" if seed is not None: np.random.seed(seed) if n_wires == 1: shp = (n_layers, 1) elif n_wires == 2: shp = (n_layers, 3) else: shp = (n_layers, 2*n_wires) params = np.random.uniform(low=low, high=high, size=shp) return params " 17424,"def _infer_meta_data(ds, x, y, hue, hue_style, add_guide, funcname): dvars = set(ds.variables.keys()) error_msg = "" must be one of ({:s})"".format("", "".join(dvars)) if x not in dvars: raise ValueError(""x"" + error_msg) if y not in dvars: raise ValueError(""y"" + error_msg) if hue is not None and hue not in dvars: raise ValueError(""hue"" + error_msg) if hue: hue_is_numeric = _is_numeric(ds[hue].values) if hue_style is None: hue_style = ""continuous"" if hue_is_numeric else ""discrete"" if not hue_is_numeric and (hue_style == ""continuous""): raise ValueError( f""Cannot create a colorbar for a non numeric coordinate: {hue}"" ) if add_guide is None or add_guide is True: add_colorbar = True if hue_style == ""continuous"" else False add_legend = True if hue_style == ""discrete"" else False else: add_colorbar = False add_legend = False else: if add_guide is True and not (funcname == ""quiver"" or funcname == ""streamplot""): raise ValueError(""Cannot set add_guide when hue is None."") add_legend = False add_colorbar = False if (add_guide or add_guide is None) and funcname == ""quiver"": add_quiverkey = True if hue: add_colorbar = True if not hue_style: hue_style = ""continuous"" elif hue_style != ""continuous"": raise ValueError( ""hue_style must be 'continuous' or None for .plot.quiver or "" "".plot.streamplot"" ) else: add_quiverkey = False if (add_guide or add_guide is None) and funcname == ""streamplot"": if hue: add_colorbar = True if not hue_style: hue_style = ""continuous"" elif hue_style != ""continuous"": raise ValueError( ""hue_style must be 'continuous' or None for .plot.quiver or "" "".plot.streamplot"" ) if hue_style is not None and hue_style not in [""discrete"", ""continuous""]: raise ValueError(""hue_style must be either None, 'discrete' or 'continuous'."") if hue: hue_label = label_from_attrs(ds[hue]) hue = ds[hue] else: hue_label = None hue = None return { ""add_colorbar"": add_colorbar, ""add_legend"": add_legend, ""add_quiverkey"": add_quiverkey, ""hue_label"": hue_label, ""hue_style"": hue_style, ""xlabel"": label_from_attrs(ds[x]), ""ylabel"": label_from_attrs(ds[y]), ""hue"": hue, } ","def _infer_meta_data(ds, x, y, hue, hue_style, add_guide, funcname): dvars = set(ds.variables.keys()) error_msg = "" must be one of ({:s})"".format("", "".join(dvars)) if x not in dvars: raise ValueError(""x"" + error_msg) if y not in dvars: raise ValueError(""y"" + error_msg) if hue is not None and hue not in dvars: raise ValueError(""hue"" + error_msg) if hue: hue_is_numeric = _is_numeric(ds[hue].values) if hue_style is None: hue_style = ""continuous"" if hue_is_numeric else ""discrete"" if not hue_is_numeric and (hue_style == ""continuous""): raise ValueError( f""Cannot create a colorbar for a non numeric coordinate: {hue}"" ) if add_guide is None or add_guide is True: add_colorbar = True if hue_style == ""continuous"" else False add_legend = True if hue_style == ""discrete"" else False else: add_colorbar = False add_legend = False else: if add_guide is True and funcname not in (""quiver"", ""streamplot""): raise ValueError(""Cannot set add_guide when hue is None."") add_legend = False add_colorbar = False if (add_guide or add_guide is None) and funcname == ""quiver"": add_quiverkey = True if hue: add_colorbar = True if not hue_style: hue_style = ""continuous"" elif hue_style != ""continuous"": raise ValueError( ""hue_style must be 'continuous' or None for .plot.quiver or "" "".plot.streamplot"" ) else: add_quiverkey = False if (add_guide or add_guide is None) and funcname == ""streamplot"": if hue: add_colorbar = True if not hue_style: hue_style = ""continuous"" elif hue_style != ""continuous"": raise ValueError( ""hue_style must be 'continuous' or None for .plot.quiver or "" "".plot.streamplot"" ) if hue_style is not None and hue_style not in [""discrete"", ""continuous""]: raise ValueError(""hue_style must be either None, 'discrete' or 'continuous'."") if hue: hue_label = label_from_attrs(ds[hue]) hue = ds[hue] else: hue_label = None hue = None return { ""add_colorbar"": add_colorbar, ""add_legend"": add_legend, ""add_quiverkey"": add_quiverkey, ""hue_label"": hue_label, ""hue_style"": hue_style, ""xlabel"": label_from_attrs(ds[x]), ""ylabel"": label_from_attrs(ds[y]), ""hue"": hue, } " 31962,"def get_mailbox_from_incident_labels(labels): """""" Gets the mailbox param from the incident labels. Args: labels (list): th incident labels. Returns: The mailbox label. """""" for label in labels: if label.get('type') == 'Mailbox': return label.get('value') return None ","def get_mailbox_from_incident_labels(labels): """""" Gets the mailbox from which the incident was fetched from the incident labels. Args: labels (list): th incident labels. Returns: The mailbox label. """""" for label in labels: if label.get('type') == 'Mailbox': return label.get('value') return None " 13910,"def _find_excluded_ranges( lines: List[Tuple[int, str]], *, warnings: _ExclusionRangeWarnings, exclude_lines_by_pattern: Optional[str] = None, exclude_branches_by_pattern: Optional[str] = None, exclude_pattern_prefix: str, ) -> Callable[[int], bool]: """""" Scan through all lines to find line ranges and branch ranges covered by exclusion markers. Example: >>> lines = [(11, '//PREFIX_EXCL_LINE'), (13, '//IGNORE_LINE'), (15, '//PREFIX_EXCL_START'), (18, '//PREFIX_EXCL_STOP'), ... (21, '//PREFIX_EXCL_BR_LINE'), (23, '//IGNORE_BR'), (25, '//PREFIX_EXCL_BR_START'), (28, '//PREFIX_EXCL_BR_STOP')] >>> [exclude_line, exclude_branch] = _find_excluded_ranges( ... lines, warnings=..., exclude_lines_by_pattern = '.*IGNORE_LINE', ... exclude_branches_by_pattern = '.*IGNORE_BR', exclude_pattern_prefix='PREFIX') >>> [lineno for lineno in range(30) if exclude_line(lineno)] [11, 13, 15, 16, 17] >>> [lineno for lineno in range(30) if exclude_branch(lineno)] [21, 23, 25, 26, 27] """""" exclude_lines_by_pattern_regex = None if exclude_lines_by_pattern: exclude_lines_by_pattern_regex = re.compile(exclude_lines_by_pattern) exclude_branches_by_pattern_regex = None if exclude_branches_by_pattern: exclude_branches_by_pattern_regex = re.compile(exclude_branches_by_pattern) # possibly overlapping half-open ranges that are excluded exclude_line_ranges: List[Tuple[int, int]] = [] exclude_branch_ranges: List[Tuple[int, int]] = [] exclusion_stack_line = [] exclusion_stack_branch = [] for lineno, code in lines: if _EXCLUDE_FLAG in code: # process the exclusion marker # # header is a marker name like LCOV or GCOVR # # START flags are added to the exlusion stack # STOP flags remove a marker from the exclusion stack # line exclusion excl_line_pattern = re.compile( ""("" + exclude_pattern_prefix + "")"" + _EXCLUDE_LINE_PATTERN_POSTFIX ) for header, flag in excl_line_pattern.findall(code): if flag == ""LINE"": if exclusion_stack_line: warnings.line_after_start( lineno, f""{header}_EXCL_LINE"", exclusion_stack_line[-1][1] ) else: exclude_line_ranges.append((lineno, lineno + 1)) if flag == ""START"": exclusion_stack_line.append((header, lineno)) elif flag == ""STOP"": if not exclusion_stack_line: warnings.stop_without_start( lineno, f""{header}_EXCL_START"", f""{header}_EXCL_STOP"" ) continue start_header, start_lineno = exclusion_stack_line.pop() if header != start_header: warnings.mismatched_start_stop( start_lineno, f""{start_header}_EXCL_START"", lineno, f""{header}_EXCL_STOP"", ) exclude_line_ranges.append((start_lineno, lineno)) else: # pragma: no cover pass # branche exclusion excl_branch_pattern = re.compile( ""("" + exclude_pattern_prefix + "")"" + _EXCLUDE_BRANCH_PATTERN_POSTFIX ) for header, flag in excl_branch_pattern.findall(code): if flag == ""LINE"": if exclusion_stack_branch: warnings.branch_after_start( lineno, f""{header}_EXCL_LINE"", exclusion_stack_branch[-1][1] ) else: exclude_branch_ranges.append((lineno, lineno + 1)) if flag == ""START"": exclusion_stack_branch.append((header, lineno)) elif flag == ""STOP"": if not exclusion_stack_branch: warnings.stop_without_start( lineno, f""{header}_EXCL_START"", f""{header}_EXCL_STOP"" ) continue start_header, start_lineno = exclusion_stack_branch.pop() if header != start_header: warnings.mismatched_start_stop( start_lineno, f""{start_header}_EXCL_START"", lineno, f""{header}_EXCL_STOP"", ) exclude_branch_ranges.append((start_lineno, lineno)) else: # pragma: no cover pass if exclude_lines_by_pattern_regex: if exclude_lines_by_pattern_regex.match(code): exclude_line_ranges.append((lineno, lineno + 1)) if exclude_branches_by_pattern_regex: if exclude_branches_by_pattern_regex.match(code): exclude_branch_ranges.append((lineno, lineno + 1)) for header, lineno in exclusion_stack_line: warnings.start_without_stop( lineno, f""{header}_EXCL_START"", f""{header}_EXCL_STOP"" ) for header, lineno in exclusion_stack_branch: warnings.start_without_stop( lineno, f""{header}_EXCL_START"", f""{header}_EXCL_STOP"" ) return [ _make_is_in_any_range(exclude_line_ranges), _make_is_in_any_range(exclude_branch_ranges), ] ","def _find_excluded_ranges( lines: List[Tuple[int, str]], *, warnings: _ExclusionRangeWarnings, exclude_lines_by_pattern: Optional[str] = None, exclude_branches_by_pattern: Optional[str] = None, exclude_pattern_prefix: str, ) -> Callable[[int], bool]: """""" Scan through all lines to find line ranges and branch ranges covered by exclusion markers. Example: >>> lines = [(11, '//PREFIX_EXCL_LINE'), (13, '//IGNORE_LINE'), (15, '//PREFIX_EXCL_START'), (18, '//PREFIX_EXCL_STOP'), ... (21, '//PREFIX_EXCL_BRANCH_LINE'), (23, '//IGNORE_BRANCH'), (25, '//PREFIX_EXCL_BRANCH_START'), (28, '//PREFIX_EXCL_BRANCH_STOP')] >>> [exclude_line, exclude_branch] = _find_excluded_ranges( ... lines, warnings=..., exclude_lines_by_pattern = '.*IGNORE_LINE', ... exclude_branches_by_pattern = '.*IGNORE_BR', exclude_pattern_prefix='PREFIX') >>> [lineno for lineno in range(30) if exclude_line(lineno)] [11, 13, 15, 16, 17] >>> [lineno for lineno in range(30) if exclude_branch(lineno)] [21, 23, 25, 26, 27] """""" exclude_lines_by_pattern_regex = None if exclude_lines_by_pattern: exclude_lines_by_pattern_regex = re.compile(exclude_lines_by_pattern) exclude_branches_by_pattern_regex = None if exclude_branches_by_pattern: exclude_branches_by_pattern_regex = re.compile(exclude_branches_by_pattern) # possibly overlapping half-open ranges that are excluded exclude_line_ranges: List[Tuple[int, int]] = [] exclude_branch_ranges: List[Tuple[int, int]] = [] exclusion_stack_line = [] exclusion_stack_branch = [] for lineno, code in lines: if _EXCLUDE_FLAG in code: # process the exclusion marker # # header is a marker name like LCOV or GCOVR # # START flags are added to the exlusion stack # STOP flags remove a marker from the exclusion stack # line exclusion excl_line_pattern = re.compile( ""("" + exclude_pattern_prefix + "")"" + _EXCLUDE_LINE_PATTERN_POSTFIX ) for header, flag in excl_line_pattern.findall(code): if flag == ""LINE"": if exclusion_stack_line: warnings.line_after_start( lineno, f""{header}_EXCL_LINE"", exclusion_stack_line[-1][1] ) else: exclude_line_ranges.append((lineno, lineno + 1)) if flag == ""START"": exclusion_stack_line.append((header, lineno)) elif flag == ""STOP"": if not exclusion_stack_line: warnings.stop_without_start( lineno, f""{header}_EXCL_START"", f""{header}_EXCL_STOP"" ) continue start_header, start_lineno = exclusion_stack_line.pop() if header != start_header: warnings.mismatched_start_stop( start_lineno, f""{start_header}_EXCL_START"", lineno, f""{header}_EXCL_STOP"", ) exclude_line_ranges.append((start_lineno, lineno)) else: # pragma: no cover pass # branche exclusion excl_branch_pattern = re.compile( ""("" + exclude_pattern_prefix + "")"" + _EXCLUDE_BRANCH_PATTERN_POSTFIX ) for header, flag in excl_branch_pattern.findall(code): if flag == ""LINE"": if exclusion_stack_branch: warnings.branch_after_start( lineno, f""{header}_EXCL_LINE"", exclusion_stack_branch[-1][1] ) else: exclude_branch_ranges.append((lineno, lineno + 1)) if flag == ""START"": exclusion_stack_branch.append((header, lineno)) elif flag == ""STOP"": if not exclusion_stack_branch: warnings.stop_without_start( lineno, f""{header}_EXCL_START"", f""{header}_EXCL_STOP"" ) continue start_header, start_lineno = exclusion_stack_branch.pop() if header != start_header: warnings.mismatched_start_stop( start_lineno, f""{start_header}_EXCL_START"", lineno, f""{header}_EXCL_STOP"", ) exclude_branch_ranges.append((start_lineno, lineno)) else: # pragma: no cover pass if exclude_lines_by_pattern_regex: if exclude_lines_by_pattern_regex.match(code): exclude_line_ranges.append((lineno, lineno + 1)) if exclude_branches_by_pattern_regex: if exclude_branches_by_pattern_regex.match(code): exclude_branch_ranges.append((lineno, lineno + 1)) for header, lineno in exclusion_stack_line: warnings.start_without_stop( lineno, f""{header}_EXCL_START"", f""{header}_EXCL_STOP"" ) for header, lineno in exclusion_stack_branch: warnings.start_without_stop( lineno, f""{header}_EXCL_START"", f""{header}_EXCL_STOP"" ) return [ _make_is_in_any_range(exclude_line_ranges), _make_is_in_any_range(exclude_branch_ranges), ] " 27725,"def test_testdir_duplicate_paths(testdir): testdir.makepyfile(""foo"") testdir.makepyfile(""bar"") assert len(testdir.created_files) == 1 ","def test_testdir_duplicate_paths(testdir): testdir.makepyfile(""foo"") testdir.makepyfile(""bar"") assert testdir.created_files == {testdir.tmpdir.join(""test_testdir_duplicate_paths.py"")} " 20532,"def get_parser(): # Mandatory arguments parser = argparse.ArgumentParser( description=""This program takes as input an anatomic image and the spinal cord centerline (or "" ""segmentation), and returns the an image of a straightened spinal cord. Reference: "" ""De Leener B, Mangeat G, Dupont S, Martin AR, Callot V, Stikov N, Fehlings MG, "" ""Cohen-Adad J. Topologically-preserving straightening of spinal cord MRI. J Magn "" ""Reson Imaging. 2017 Oct;46(4):1209-1219"", add_help=None, formatter_class=SmartFormatter, prog=os.path.basename(__file__).strip("".py"")) mandatory = parser.add_argument_group(""MANDATORY ARGUMENTS"") mandatory.add_argument( ""-i"", metavar=Metavar.file, help='Input image with curved spinal cord. Example: ""t2.nii.gz""', required=True) mandatory.add_argument( ""-s"", metavar=Metavar.file, help='Spinal cord centerline (or segmentation) of the input image. To obtain the centerline, you can use ' 'sct_get_centerline. To obtain the segmentation you can use sct_propseg or sct_deepseg_sc. ' 'Example: centerline.nii.gz', required=True) optional = parser.add_argument_group(""OPTIONAL ARGUMENTS"") optional.add_argument( ""-h"", ""--help"", action=""help"", help=""Show this help message and exit"") optional.add_argument( ""-dest"", metavar=Metavar.file, help=""Spinal cord centerline (or segmentation) of a destination image (which could be "" ""straight or curved). An algorithm scales the length of the input centerline to match that of the "" ""destination centerline. If using -ldisc-input and -ldisc-dest with this parameter, "" ""instead of linear scaling, the source centerline will be non-linearly matched so "" ""that the inter-vertebral discs of the input image will match that of the "" ""destination image. This feature is particularly useful for registering to a "" ""template while accounting for disc alignment."", required=False) optional.add_argument( ""-ldisc-input"", metavar=Metavar.file, help=""Labels located at the posterior edge of the intervertebral discs, for the input "" ""image (-i). All disc covering the region of interest should be provided. Exmaple: if "" ""you are interested in levels C2 to C7, then you should provide disc labels 2,3,4,5,"" ""6,7). More details about label creation at "" ""http://sourceforge.net/p/spinalcordtoolbox/wiki/create_labels/. "" # TODO (Julien) update this link ""This option must be used with the -ldisc-dest parameter."", required=False) optional.add_argument( ""-ldisc-dest"", metavar=Metavar.file, help=""Labels located at the posterior edge of the intervertebral discs, for the destination file (-dest). "" ""The same comments as in -ldisc-input apply. This option must be used with the -ldisc-input parameter."", required=False) optional.add_argument( ""-disable-straight2curved"", action='store_true', help=""Disable straight to curved transformation computation, in case you do not need the "" ""output warping field straight-->curve (faster)."", required=False) optional.add_argument( ""-disable-curved2straight"", action='store_true', help=""Disable curved to straight transformation computation, in case you do not need the "" ""output warping field curve-->straight (faster)."", required=False) optional.add_argument( ""-speed-factor"", metavar=Metavar.float, type=float, help='Acceleration factor for the calculation of the straightening warping field.' ' This speed factor enables an intermediate resampling to a lower resolution, which ' 'decreases the computational time at the cost of lower accuracy.' ' A speed factor of 2 means that the input image will be downsampled by a factor 2 ' 'before calculating the straightening warping field. For example, a 1x1x1 mm^3 image ' 'will be downsampled to 2x2x2 mm3, providing a speed factor of approximately 8.' ' Note that accelerating the straightening process reduces the precision of the ' 'algorithm, and induces undesirable edges effects. Default=1 (no downsampling).', required=False, default=1) optional.add_argument( ""-xy-size"", metavar=Metavar.float, type=float, help='Size of the output FOV in the RL/AP plane, in mm. The resolution of the destination ' 'image is the same as that of the source image (-i). Default: 35.', required=False, default=35.0) optional.add_argument( ""-o"", metavar=Metavar.file, help='Straightened file. By default, the suffix ""_straight"" will be added to the input file name.', required=False, default='') optional.add_argument( ""-ofolder"", metavar=Metavar.folder, help=""Output folder (all outputs will go there)."", action=ActionCreateFolder, required=False, default='./') optional.add_argument( '-centerline-algo', help='Algorithm for centerline fitting. Default: nurbs.', choices=('bspline', 'linear', 'nurbs'), default='nurbs') optional.add_argument( '-centerline-smooth', metavar=Metavar.int, type=int, help='Degree of smoothing for centerline fitting. Only use with -centerline-algo {bspline, linear}. Default: 10', default=10) optional.add_argument( ""-param"", metavar=Metavar.list, help=""R|Parameters for spinal cord straightening. Separate arguments with \"",\"".\n"" "" - precision: [1.0,inf] Precision factor of straightening, related to the number of slices. Increasing this parameter increases the precision along with increased computational time. Not taken into account with hanning fitting method. Default=2\n"" "" - threshold_distance: [0.0,inf] Threshold at which voxels are not considered into displacement. Increase this threshold if the image is blackout around the spinal cord too much. Default=10\n"" "" - accuracy_results: {0, 1} Disable/Enable computation of accuracy results after straightening. Default=0\n"" "" - template_orientation: {0, 1} Disable/Enable orientation of the straight image to be the same as the template. Default=0"", required=False) optional.add_argument( ""-x"", help=""Final interpolation. Default: spline."", choices=(""nn"", ""linear"", ""spline""), default=""spline"") optional.add_argument( '-qc', metavar=Metavar.str, help='The path where the quality control generated content will be saved', default=None) optional.add_argument( '-qc-dataset', metavar=Metavar.str, help='If provided, this string will be mentioned in the QC report as the dataset the ' 'process was run on', default=None) optional.add_argument( '-qc-subject', metavar=Metavar.str, help='If provided, this string will be mentioned in the QC report as the subject the ' 'process was run on', default=None) optional.add_argument( ""-r"", type=int, help=""Remove temporary files."", required=False, choices=(0, 1), default=1) optional.add_argument( ""-v"", type=int, help=""Verbose. 0: nothing, 1: basic, 2: extended."", required=False, choices=(0, 1, 2), default=1) return parser ","def get_parser(): # Mandatory arguments parser = argparse.ArgumentParser( description=""This program takes as input an anatomic image and the spinal cord centerline (or "" ""segmentation), and returns the an image of a straightened spinal cord. Reference: "" ""De Leener B, Mangeat G, Dupont S, Martin AR, Callot V, Stikov N, Fehlings MG, "" ""Cohen-Adad J. Topologically-preserving straightening of spinal cord MRI. J Magn "" ""Reson Imaging. 2017 Oct;46(4):1209-1219"", add_help=None, formatter_class=SmartFormatter, prog=os.path.basename(__file__).strip("".py"")) mandatory = parser.add_argument_group(""MANDATORY ARGUMENTS"") mandatory.add_argument( ""-i"", metavar=Metavar.file, help='Input image with curved spinal cord. Example: ""t2.nii.gz""', required=True) mandatory.add_argument( ""-s"", metavar=Metavar.file, help='Spinal cord centerline (or segmentation) of the input image. To obtain the centerline, you can use ' 'sct_get_centerline. To obtain the segmentation you can use sct_propseg or sct_deepseg_sc. ' 'Example: centerline.nii.gz', required=True) optional = parser.add_argument_group(""OPTIONAL ARGUMENTS"") optional.add_argument( ""-h"", ""--help"", action=""help"", help=""Show this help message and exit"") optional.add_argument( ""-dest"", metavar=Metavar.file, help=""Spinal cord centerline (or segmentation) of a destination image (which could be "" ""straight or curved). An algorithm scales the length of the input centerline to match that of the "" ""destination centerline. If using -ldisc-input and -ldisc-dest with this parameter, "" ""instead of linear scaling, the source centerline will be non-linearly matched so "" ""that the inter-vertebral discs of the input image will match that of the "" ""destination image. This feature is particularly useful for registering to a "" ""template while accounting for disc alignment."", required=False) optional.add_argument( ""-ldisc-input"", metavar=Metavar.file, help=""Labels located at the posterior edge of the intervertebral discs, for the input "" ""image (-i). All disc covering the region of interest should be provided. Exmaple: if "" ""you are interested in levels C2 to C7, then you should provide disc labels 2,3,4,5,"" ""6,7). More details about label creation at "" ""http://sourceforge.net/p/spinalcordtoolbox/wiki/create_labels/. "" # TODO (Julien) update this link ""This option must be used with the -ldisc-dest parameter."", required=False) optional.add_argument( ""-ldisc-dest"", metavar=Metavar.file, help=""Labels located at the posterior edge of the intervertebral discs, for the destination file (-dest). "" ""The same comments as in -ldisc-input apply. This option must be used with the -ldisc-input parameter."", required=False) optional.add_argument( ""-disable-straight2curved"", action='store_true', help=""Disable straight to curved transformation computation, in case you do not need the "" ""output warping field straight-->curve (faster)."", required=False) optional.add_argument( ""-disable-curved2straight"", action='store_true', help=""Disable curved to straight transformation computation, in case you do not need the "" ""output warping field curve-->straight (faster)."", required=False) optional.add_argument( ""-speed-factor"", metavar=Metavar.float, type=float, help='Acceleration factor for the calculation of the straightening warping field.' ' This speed factor enables an intermediate resampling to a lower resolution, which ' 'decreases the computational time at the cost of lower accuracy.' ' A speed factor of 2 means that the input image will be downsampled by a factor 2 ' 'before calculating the straightening warping field. For example, a 1x1x1 mm^3 image ' 'will be downsampled to 2x2x2 mm3, providing a speed factor of approximately 8.' ' Note that accelerating the straightening process reduces the precision of the ' 'algorithm, and induces undesirable edges effects. Default=1 (no downsampling).', required=False, default=1) optional.add_argument( ""-xy-size"", metavar=Metavar.float, type=float, help='Size of the output FOV in the RL/AP plane, in mm. The resolution of the destination ' 'image is the same as that of the source image (-i). Default: 35.', required=False, default=35.0) optional.add_argument( ""-o"", metavar=Metavar.file, help='Straightened file. By default, the suffix ""_straight"" will be added to the input file name.', required=False, default='') optional.add_argument( ""-ofolder"", metavar=Metavar.folder, help=""Output folder (all outputs will go there)."", action=ActionCreateFolder, required=False, default='./') optional.add_argument( '-centerline-algo', help='Algorithm for centerline fitting. Default: nurbs.', choices=('bspline', 'linear', 'nurbs'), default='nurbs') optional.add_argument( '-centerline-smooth', metavar=Metavar.int, type=int, help='Degree of smoothing for centerline fitting. Only use with -centerline-algo {bspline, linear}. Default: 10', default=10) optional.add_argument( ""-param"", metavar=Metavar.list, help=""R|Parameters for spinal cord straightening. Separate arguments with \"",\"".\n"" "" - precision: Float [1, inf) Precision factor of straightening, related to the number of slices. Increasing this parameter increases the precision along with increased computational time. Not taken into account with Hanning fitting method. Default=2\n"" "" - threshold_distance: [0.0,inf] Threshold at which voxels are not considered into displacement. Increase this threshold if the image is blackout around the spinal cord too much. Default=10\n"" "" - accuracy_results: {0, 1} Disable/Enable computation of accuracy results after straightening. Default=0\n"" "" - template_orientation: {0, 1} Disable/Enable orientation of the straight image to be the same as the template. Default=0"", required=False) optional.add_argument( ""-x"", help=""Final interpolation. Default: spline."", choices=(""nn"", ""linear"", ""spline""), default=""spline"") optional.add_argument( '-qc', metavar=Metavar.str, help='The path where the quality control generated content will be saved', default=None) optional.add_argument( '-qc-dataset', metavar=Metavar.str, help='If provided, this string will be mentioned in the QC report as the dataset the ' 'process was run on', default=None) optional.add_argument( '-qc-subject', metavar=Metavar.str, help='If provided, this string will be mentioned in the QC report as the subject the ' 'process was run on', default=None) optional.add_argument( ""-r"", type=int, help=""Remove temporary files."", required=False, choices=(0, 1), default=1) optional.add_argument( ""-v"", type=int, help=""Verbose. 0: nothing, 1: basic, 2: extended."", required=False, choices=(0, 1, 2), default=1) return parser " 49866,"def first_solar_spectral_correction(pw, airmass_absolute, module_type=None, coefficients=None, min_pw=0.1, max_pw=8): r"""""" Spectral mismatch modifier based on precipitable water and absolute (pressure-adjusted) airmass. Estimates a spectral mismatch modifier :math:`M` representing the effect on module short circuit current of variation in the spectral irradiance. :math:`M` is estimated from absolute (pressure currected) air mass, :math:`AM_a`, and precipitable water, :math:`Pw`, using the following function: .. math:: M = c_1 + c_2 AM_a + c_3 Pw + c_4 AM_a^{0.5} + c_5 Pw^{0.5} + c_6 \frac{AM_a} {Pw^{0.5}} Default coefficients are determined for several cell types with known quantum efficiency curves, by using the Simple Model of the Atmospheric Radiative Transfer of Sunshine (SMARTS) [1]_. Using SMARTS, spectrums are simulated with all combinations of AMa and Pw where: * :math:`0.5 \textrm{cm} <= Pw <= 5 \textrm{cm}` * :math:`1.0 <= AM_a <= 5.0` * Spectral range is limited to that of CMP11 (280 nm to 2800 nm) * spectrum simulated on a plane normal to the sun * All other parameters fixed at G173 standard From these simulated spectra, M is calculated using the known quantum efficiency curves. Multiple linear regression is then applied to fit Eq. 1 to determine the coefficients for each module. Based on the PVLIB Matlab function ``pvl_FSspeccorr`` by Mitchell Lee and Alex Panchula of First Solar, 2016 [2]_. Parameters ---------- pw : array-like atmospheric precipitable water. [cm] airmass_absolute : array-like absolute (pressure-adjusted) airmass. [unitless] min_pw : float, default 0.1 minimum atmospheric precipitable water. Any pw value lower than min_pw is set to min_pw to avoid model divergence. [cm] max_pw : float, default 8 maximum atmospheric precipitable water. Any pw value higher than max_pw is set to NaN to avoid model divergence. [cm] module_type : None or string, default None a string specifying a cell type. Values of 'cdte', 'monosi', 'xsi', 'multisi', and 'polysi' (can be lower or upper case). If provided, module_type selects default coefficients for the following modules: * 'cdte' - First Solar Series 4-2 CdTe module. * 'monosi', 'xsi' - First Solar TetraSun module. * 'multisi', 'polysi' - anonymous multi-crystalline silicon module. * 'cigs' - anonymous copper indium gallium selenide module. * 'asi' - anonymous amorphous silicon module. The module used to calculate the spectral correction coefficients corresponds to the Multi-crystalline silicon Manufacturer 2 Model C from [3]_. The spectral response (SR) of CIGS and a-Si modules used to derive coefficients can be found in [4]_ coefficients : None or array-like, default None Allows for entry of user-defined spectral correction coefficients. Coefficients must be of length 6. Derivation of coefficients requires use of SMARTS and PV module quantum efficiency curve. Useful for modeling PV module types which are not included as defaults, or to fine tune the spectral correction to a particular PV module. Note that the parameters for modules with very similar quantum efficiency should be similar, in most cases limiting the need for module specific coefficients. Returns ------- modifier: array-like spectral mismatch factor (unitless) which is can be multiplied with broadband irradiance reaching a module's cells to estimate effective irradiance, i.e., the irradiance that is converted to electrical current. References ---------- .. [1] Gueymard, Christian. SMARTS2: a simple model of the atmospheric radiative transfer of sunshine: algorithms and performance assessment. Cocoa, FL: Florida Solar Energy Center, 1995. .. [2] Lee, Mitchell, and Panchula, Alex. ""Spectral Correction for Photovoltaic Module Performance Based on Air Mass and Precipitable Water."" IEEE Photovoltaic Specialists Conference, Portland, 2016 .. [3] Marion, William F., et al. User's Manual for Data for Validating Models for PV Module Performance. National Renewable Energy Laboratory, 2014. http://www.nrel.gov/docs/fy14osti/61610.pdf .. [4] Schweiger, M. and Hermann, W, Influence of Spectral Effects on Energy Yield of Different PV Modules: Comparison of Pwat and MMF Approach, TUV Rheinland Energy GmbH report 21237296.003, January 2017 """""" # --- Screen Input Data --- # *** Pw *** # Replace Pw Values below 0.1 cm with 0.1 cm to prevent model from # diverging"" pw = np.atleast_1d(pw) pw = pw.astype('float64') if np.min(pw) < min_pw: pw = np.maximum(pw, min_pw) warn('Exceptionally low pw values replaced with {} cm to prevent ' 'model divergence'.format(min_pw)) # Warn user about Pw data that is exceptionally high if np.max(pw) > max_pw: pw[pw > max_pw] = np.nan warn('Exceptionally high pw values replaced by np.nan: ' 'check input data.') # *** AMa *** # Replace Extremely High AM with AM 10 to prevent model divergence # AM > 10 will only occur very close to sunset if np.max(airmass_absolute) > 10: airmass_absolute = np.minimum(airmass_absolute, 10) # Warn user about AMa data that is exceptionally low if np.min(airmass_absolute) < 0.58: warn('Exceptionally low air mass: ' + 'model not intended for extra-terrestrial use') # pvl_absoluteairmass(1,pvl_alt2pres(4340)) = 0.58 Elevation of # Mina Pirquita, Argentian = 4340 m. Highest elevation city with # population over 50,000. _coefficients = {} _coefficients['cdte'] = ( 0.86273, -0.038948, -0.012506, 0.098871, 0.084658, -0.0042948) _coefficients['monosi'] = ( 0.85914, -0.020880, -0.0058853, 0.12029, 0.026814, -0.0017810) _coefficients['xsi'] = _coefficients['monosi'] _coefficients['polysi'] = ( 0.84090, -0.027539, -0.0079224, 0.13570, 0.038024, -0.0021218) _coefficients['multisi'] = _coefficients['polysi'] _coefficients['cigs'] = ( 0.85252, -0.022314, -0.0047216, 0.13666, 0.013342, -0.0008945) _coefficients['asi'] = ( 1.12094, -0.047620, -0.0083627, -0.10443, 0.098382, -0.0033818) if module_type is not None and coefficients is None: coefficients = _coefficients[module_type.lower()] elif module_type is None and coefficients is not None: pass elif module_type is None and coefficients is None: raise TypeError('No valid input provided, both module_type and ' + 'coefficients are None') else: raise TypeError('Cannot resolve input, must supply only one of ' + 'module_type and coefficients') # Evaluate Spectral Shift coeff = coefficients ama = airmass_absolute modifier = ( coeff[0] + coeff[1]*ama + coeff[2]*pw + coeff[3]*np.sqrt(ama) + coeff[4]*np.sqrt(pw) + coeff[5]*ama/np.sqrt(pw)) return modifier ","def first_solar_spectral_correction(pw, airmass_absolute, module_type=None, coefficients=None, min_pw=0.1, max_pw=8): r"""""" Spectral mismatch modifier based on precipitable water and absolute (pressure-adjusted) airmass. Estimates a spectral mismatch modifier :math:`M` representing the effect on module short circuit current of variation in the spectral irradiance. :math:`M` is estimated from absolute (pressure currected) air mass, :math:`AM_a`, and precipitable water, :math:`Pw`, using the following function: .. math:: M = c_1 + c_2 AM_a + c_3 Pw + c_4 AM_a^{0.5} + c_5 Pw^{0.5} + c_6 \frac{AM_a} {Pw^{0.5}} Default coefficients are determined for several cell types with known quantum efficiency curves, by using the Simple Model of the Atmospheric Radiative Transfer of Sunshine (SMARTS) [1]_. Using SMARTS, spectrums are simulated with all combinations of AMa and Pw where: * :math:`0.5 \textrm{cm} <= Pw <= 5 \textrm{cm}` * :math:`1.0 <= AM_a <= 5.0` * Spectral range is limited to that of CMP11 (280 nm to 2800 nm) * spectrum simulated on a plane normal to the sun * All other parameters fixed at G173 standard From these simulated spectra, M is calculated using the known quantum efficiency curves. Multiple linear regression is then applied to fit Eq. 1 to determine the coefficients for each module. Based on the PVLIB Matlab function ``pvl_FSspeccorr`` by Mitchell Lee and Alex Panchula of First Solar, 2016 [2]_. Parameters ---------- pw : array-like atmospheric precipitable water. [cm] airmass_absolute : array-like absolute (pressure-adjusted) airmass. [unitless] min_pw : float, default 0.1 minimum atmospheric precipitable water. Any pw value lower than min_pw is set to min_pw to avoid model divergence. [cm] max_pw : float, default 8 maximum atmospheric precipitable water. Any pw value higher than max_pw is set to NaN to avoid model divergence. [cm] module_type : None or string, default None a string specifying a cell type. Values of 'cdte', 'monosi', 'xsi', 'multisi', and 'polysi' (can be lower or upper case). If provided, module_type selects default coefficients for the following modules: * 'cdte' - First Solar Series 4-2 CdTe module. * 'monosi', 'xsi' - First Solar TetraSun module. * 'multisi', 'polysi' - anonymous multi-crystalline silicon module. * 'cigs' - anonymous copper indium gallium selenide module. * 'asi' - anonymous amorphous silicon module. The module used to calculate the spectral correction coefficients corresponds to the Multi-crystalline silicon Manufacturer 2 Model C from [3]_. The spectral response (SR) of CIGS and a-Si modules used to derive coefficients can be found in [4]_ coefficients : None or array-like, default None Allows for entry of user-defined spectral correction coefficients. Coefficients must be of length 6. Derivation of coefficients requires use of SMARTS and PV module quantum efficiency curve. Useful for modeling PV module types which are not included as defaults, or to fine tune the spectral correction to a particular PV module. Note that the parameters for modules with very similar quantum efficiency should be similar, in most cases limiting the need for module specific coefficients. Returns ------- modifier: array-like spectral mismatch factor (unitless) which is can be multiplied with broadband irradiance reaching a module's cells to estimate effective irradiance, i.e., the irradiance that is converted to electrical current. References ---------- .. [1] Gueymard, Christian. SMARTS2: a simple model of the atmospheric radiative transfer of sunshine: algorithms and performance assessment. Cocoa, FL: Florida Solar Energy Center, 1995. .. [2] Lee, Mitchell, and Panchula, Alex. ""Spectral Correction for Photovoltaic Module Performance Based on Air Mass and Precipitable Water."" IEEE Photovoltaic Specialists Conference, Portland, 2016 .. [3] Marion, William F., et al. User's Manual for Data for Validating Models for PV Module Performance. National Renewable Energy Laboratory, 2014. http://www.nrel.gov/docs/fy14osti/61610.pdf .. [4] Schweiger, M. and Hermann, W, Influence of Spectral Effects on Energy Yield of Different PV Modules: Comparison of Pwat and MMF Approach, TUV Rheinland Energy GmbH report 21237296.003, January 2017 """""" # --- Screen Input Data --- # *** Pw *** # Replace Pw Values below 0.1 cm with 0.1 cm to prevent model from # diverging"" pw = np.atleast_1d(pw) pw = pw.astype('float64') if np.min(pw) < min_pw: pw = np.maximum(pw, min_pw) warn(f'Exceptionally low pw values replaced with {min_pw} cm to prevent ' 'model divergence') # Warn user about Pw data that is exceptionally high if np.max(pw) > max_pw: pw[pw > max_pw] = np.nan warn('Exceptionally high pw values replaced by np.nan: ' 'check input data.') # *** AMa *** # Replace Extremely High AM with AM 10 to prevent model divergence # AM > 10 will only occur very close to sunset if np.max(airmass_absolute) > 10: airmass_absolute = np.minimum(airmass_absolute, 10) # Warn user about AMa data that is exceptionally low if np.min(airmass_absolute) < 0.58: warn('Exceptionally low air mass: ' + 'model not intended for extra-terrestrial use') # pvl_absoluteairmass(1,pvl_alt2pres(4340)) = 0.58 Elevation of # Mina Pirquita, Argentian = 4340 m. Highest elevation city with # population over 50,000. _coefficients = {} _coefficients['cdte'] = ( 0.86273, -0.038948, -0.012506, 0.098871, 0.084658, -0.0042948) _coefficients['monosi'] = ( 0.85914, -0.020880, -0.0058853, 0.12029, 0.026814, -0.0017810) _coefficients['xsi'] = _coefficients['monosi'] _coefficients['polysi'] = ( 0.84090, -0.027539, -0.0079224, 0.13570, 0.038024, -0.0021218) _coefficients['multisi'] = _coefficients['polysi'] _coefficients['cigs'] = ( 0.85252, -0.022314, -0.0047216, 0.13666, 0.013342, -0.0008945) _coefficients['asi'] = ( 1.12094, -0.047620, -0.0083627, -0.10443, 0.098382, -0.0033818) if module_type is not None and coefficients is None: coefficients = _coefficients[module_type.lower()] elif module_type is None and coefficients is not None: pass elif module_type is None and coefficients is None: raise TypeError('No valid input provided, both module_type and ' + 'coefficients are None') else: raise TypeError('Cannot resolve input, must supply only one of ' + 'module_type and coefficients') # Evaluate Spectral Shift coeff = coefficients ama = airmass_absolute modifier = ( coeff[0] + coeff[1]*ama + coeff[2]*pw + coeff[3]*np.sqrt(ama) + coeff[4]*np.sqrt(pw) + coeff[5]*ama/np.sqrt(pw)) return modifier " 13983,"def fanofactor(spiketrains, warn_tolerance=0.1 * pq.ms): r"""""" Evaluates the empirical Fano factor F of the spike counts of a list of `neo.SpikeTrain` objects. Given the vector v containing the observed spike counts (one per spike train) in the time window [t0, t1], F is defined as: .. math:: F := \frac{var(v)}{mean(v)} The Fano factor is typically computed for spike trains representing the activity of the same neuron over different trials. The higher F, the larger the cross-trial non-stationarity. In theory for a time-stationary Poisson process, F=1. Parameters ---------- spiketrains : list List of `neo.SpikeTrain` or `pq.Quantity` or `np.ndarray` or list of spike times for which to compute the Fano factor of spike counts. warn_tolerance : pq.Quantity In case of a list of input neo.SpikeTrains, if their durations vary by more than `warn_tolerence` in their absolute values, throw a waring (see Notes). Default: 0.1 ms. Returns ------- fano : float The Fano factor of the spike counts of the input spike trains. Returns np.NaN if an empty list is specified, or if all spike trains are empty. Raises ------ TypeError If the input spiketrains are neo.SpikeTrain objects, but `warn_tolerance` is not a quantity. Notes ----- The check for the equal duration of the input spike trains is performed only if the input is of type`neo.SpikeTrain`: if you pass a numpy array, please make sure that they all have the same duration manually. """""" # Build array of spike counts (one per spike train) spike_counts = np.array([len(st) for st in spiketrains]) # Compute FF if all(count == 0 for count in spike_counts): # empty list of spiketrains reaches this branch, and NaN is returned return np.nan if all(isinstance(st, neo.SpikeTrain) for st in spiketrains): if not is_time_quantity(warn_tolerance): raise TypeError(""'warn_tolerance' must be a time quantity."") durations = [(st.t_stop - st.t_start).simplified.item() for st in spiketrains] durations_min = min(durations) durations_max = max(durations) if durations_max - durations_min > warn_tolerance.simplified.item(): warnings.warn(""Fano factor calculated for spike trains of "" ""different duration (minimum: {_min}s, maximum "" ""{_max}s)."".format(_min=durations_min, _max=durations_max)) fano = spike_counts.var() / spike_counts.mean() return fano ","def fanofactor(spiketrains, warn_tolerance=0.1 * pq.ms): r"""""" Evaluates the empirical Fano factor F of the spike counts of a list of `neo.SpikeTrain` objects. Given the vector v containing the observed spike counts (one per spike train) in the time window [t0, t1], F is defined as: .. math:: F := \frac{var(v)}{mean(v)} The Fano factor is typically computed for spike trains representing the activity of the same neuron over different trials. The higher F, the larger the cross-trial non-stationarity. In theory for a time-stationary Poisson process, F=1. Parameters ---------- spiketrains : list List of `neo.SpikeTrain` or `pq.Quantity` or `np.ndarray` or list of spike times for which to compute the Fano factor of spike counts. warn_tolerance : pq.Quantity In case of a list of input neo.SpikeTrains, if their durations vary by more than `warn_tolerence` in their absolute values, throw a warning (see Notes). Default: 0.1 ms. Returns ------- fano : float The Fano factor of the spike counts of the input spike trains. Returns np.NaN if an empty list is specified, or if all spike trains are empty. Raises ------ TypeError If the input spiketrains are neo.SpikeTrain objects, but `warn_tolerance` is not a quantity. Notes ----- The check for the equal duration of the input spike trains is performed only if the input is of type`neo.SpikeTrain`: if you pass a numpy array, please make sure that they all have the same duration manually. """""" # Build array of spike counts (one per spike train) spike_counts = np.array([len(st) for st in spiketrains]) # Compute FF if all(count == 0 for count in spike_counts): # empty list of spiketrains reaches this branch, and NaN is returned return np.nan if all(isinstance(st, neo.SpikeTrain) for st in spiketrains): if not is_time_quantity(warn_tolerance): raise TypeError(""'warn_tolerance' must be a time quantity."") durations = [(st.t_stop - st.t_start).simplified.item() for st in spiketrains] durations_min = min(durations) durations_max = max(durations) if durations_max - durations_min > warn_tolerance.simplified.item(): warnings.warn(""Fano factor calculated for spike trains of "" ""different duration (minimum: {_min}s, maximum "" ""{_max}s)."".format(_min=durations_min, _max=durations_max)) fano = spike_counts.var() / spike_counts.mean() return fano " 33752,"def allreduce_multigpu(tensor_list: list, group_name: str = ""default"", op=types.ReduceOp.SUM): """"""Collective allrecue a list of tensors across the group. Args: tensor_list (List[tensor]): list of tensors to be allreduced, each on a GPU. group_name (str): the collective group name to perform allreduce. Returns: None """""" if not types.cupy_available(): raise RuntimeError(""Multigpu calls requires NCCL and Cupy."") _check_tensor_list_input(tensor_list) g = _check_and_get_group(group_name) opts = types.AllReduceOptions opts.reduceOp = op g.allreduce(tensor_list, opts) ","def allreduce_multigpu(tensor_list: list, group_name: str = ""default"", op=types.ReduceOp.SUM): """"""Collective allreduce a list of tensors across the group. Args: tensor_list (List[tensor]): list of tensors to be allreduced, each on a GPU. group_name (str): the collective group name to perform allreduce. Returns: None """""" if not types.cupy_available(): raise RuntimeError(""Multigpu calls requires NCCL and Cupy."") _check_tensor_list_input(tensor_list) g = _check_and_get_group(group_name) opts = types.AllReduceOptions opts.reduceOp = op g.allreduce(tensor_list, opts) " 29977,"def proxy_info_from_url(url, method=""http"", noproxy=None): """"""Construct a ProxyInfo from a URL (such as http_proxy env var) """""" url = urlparse.urlparse(url) username = None password = None port = None if ""@"" in url[1]: ident, host_port = url[1].split(""@"", 1) if "":"" in ident: username, password = ident.split("":"", 1) else: password = ident else: host_port = url[1] if "":"" in host_port: host, port = host_port.split("":"", 1) else: host = host_port if port: port = int(port) else: port = dict(https=443, http=80)[method] proxy_type = 3 # socks.PROXY_TYPE_HTTP if len(url.scheme) > 0: _scheme_prefix = url.scheme.lower() if _scheme_prefix in proxy_schemes: proxy_type = proxy_schemes[_scheme_prefix] pi = ProxyInfo( proxy_type=proxy_type, proxy_host=host, proxy_port=port, proxy_user=username or None, proxy_pass=password or None, proxy_headers=None, ) bypass_hosts = [] # If not given an explicit noproxy value, respect values in env vars. if noproxy is None: noproxy = os.environ.get(""no_proxy"", os.environ.get(""NO_PROXY"", """")) # Special case: A single '*' character means all hosts should be bypassed. if noproxy == ""*"": bypass_hosts = AllHosts elif noproxy.strip(): bypass_hosts = noproxy.split("","") bypass_hosts = filter(bool, bypass_hosts) # To exclude empty string. pi.bypass_hosts = bypass_hosts return pi ","def proxy_info_from_url(url, method=""http"", noproxy=None): """"""Construct a ProxyInfo from a URL (such as http_proxy env var) """""" url = urlparse.urlparse(url) username = None password = None port = None if ""@"" in url[1]: ident, host_port = url[1].split(""@"", 1) if "":"" in ident: username, password = ident.split("":"", 1) else: password = ident else: host_port = url[1] if "":"" in host_port: host, port = host_port.split("":"", 1) else: host = host_port if port: port = int(port) else: port = dict(https=443, http=80)[method] proxy_type = proxy_schemes.get(url.scheme.lower(), 3) # socks.PROXY_TYPE_HTTP pi = ProxyInfo( proxy_type=proxy_type, proxy_host=host, proxy_port=port, proxy_user=username or None, proxy_pass=password or None, proxy_headers=None, ) bypass_hosts = [] # If not given an explicit noproxy value, respect values in env vars. if noproxy is None: noproxy = os.environ.get(""no_proxy"", os.environ.get(""NO_PROXY"", """")) # Special case: A single '*' character means all hosts should be bypassed. if noproxy == ""*"": bypass_hosts = AllHosts elif noproxy.strip(): bypass_hosts = noproxy.split("","") bypass_hosts = filter(bool, bypass_hosts) # To exclude empty string. pi.bypass_hosts = bypass_hosts return pi " 32590,"def main(): # Args is always stronger. Get last run even stronger demisto_params = demisto.params() | demisto.args() | demisto.getLastRun() token = demisto_params['token']['password'] should_push_events = argToBoolean(demisto_params.get('should_push_events', 'false')) verify = demisto_params['verify'] proxy = demisto_params['proxy'] after = get_timestamp_format(demisto_params.get('after')) client = Client(base_url='https://api.abnormalplatform.com/v1', verify=verify, proxy=proxy, headers={""Authorization"": f""Bearer {token}""}) command = demisto.command() try: threats, last_run = get_threats(client, after) if command == 'test-module': return_results('ok') elif command == 'fetch-events': demisto.setLastRun({'after': last_run}) send_events_to_xsiam(threats, VENDOR, PRODUCT) elif command == 'AbnormalSecurityEventCollector-get-events': command_results = CommandResults( readable_output=tableToMarkdown(f'{VENDOR} - {PRODUCT} events', threats), raw_response=threats, ) return_results(command_results) if should_push_events: send_events_to_xsiam(threats, VENDOR, PRODUCT) except Exception as e: return_error(str(e)) ","def main(): # Args is always stronger. Get last run even stronger demisto_params = demisto.params() | demisto.args() | demisto.getLastRun() token = demisto_params['token']['password'] should_push_events = argToBoolean(demisto_params.get('should_push_events', 'false')) verify = demisto_params['verify'] proxy = demisto_params['proxy'] after = get_timestamp_format(demisto_params.get('after')) client = Client(base_url='https://api.abnormalplatform.com/v1', verify=verify, proxy=proxy, headers={""Authorization"": f""Bearer {token}""}) command = demisto.command() try: threats, last_run = get_threats(client, after) if command == 'test-module': return_results('ok') elif command == 'fetch-events': demisto.setLastRun({'after': last_run}) send_events_to_xsiam(threats, VENDOR, PRODUCT) elif command == 'abnormalsecurity-get-events': command_results = CommandResults( readable_output=tableToMarkdown(f'{VENDOR} - {PRODUCT} events', threats), raw_response=threats, ) return_results(command_results) if should_push_events: send_events_to_xsiam(threats, VENDOR, PRODUCT) except Exception as e: return_error(str(e)) " 32297,"def check_security_rules(topology: Topology, device_filter_string: str = None) -> ConfigurationHygieneCheckResult: """""" Check security rules are configured correctly. :param topology: `Topology` instance !no-auto-argument :param device_filter_string: String to filter to only check given device """""" return HygieneLookups.check_security_rules(topology, device_filter_str=device_filter_string) ","def check_security_rules(topology: Topology, device_filter_string: Optional[str] = None) -> ConfigurationHygieneCheckResult: """""" Check security rules are configured correctly. :param topology: `Topology` instance !no-auto-argument :param device_filter_string: String to filter to only check given device """""" return HygieneLookups.check_security_rules(topology, device_filter_str=device_filter_string) " 8868,"def subreddit_sorting(bot, trigger, s, sorting): if sorting == 'new': submissions = list(s.new()) elif sorting == 'top': submissions = list(s.top()) elif sorting == 'hot': submissions = list(s.hot()) elif sorting == 'controversial': submissions = list(s.controversial()) elif sorting == 'gilded': submissions = list(s.gilded()) elif sorting == 'rising': submissions = list(s.rising()) elif sorting == 'sticky': try: submissions = [s.sticky()] except prawcore.exceptions.NotFound: bot.say(""r/"" + s.display_name + "" appears to not have a stickied post!"") return elif sorting == 'random': submissions = [s.random()] or [] else: return if not len(submissions): bot.say(""r/"" + s.display_name + ' ' + sorting + "" appears to have no items!"") return NOLIMIT if sorting != 'sticky': submissions_filter = [] for submission in submissions: if not submission.stickied: submissions_filter.append(submission) submissions = submissions_filter submission = submissions[0] link = ""https://reddit.com/r/"" + s.display_name + ""/comments/"" + str(submission) say_post_info( bot, trigger, re.match(post_url, link).group(1), False, True) ","def subreddit_sorting(bot, trigger, s, sorting): if sorting == 'new': submissions = list(s.new()) elif sorting == 'top': submissions = list(s.top()) elif sorting == 'hot': submissions = list(s.hot()) elif sorting == 'controversial': submissions = list(s.controversial()) elif sorting == 'gilded': submissions = list(s.gilded()) elif sorting == 'rising': submissions = list(s.rising(limit=10)) elif sorting == 'sticky': try: submissions = [s.sticky()] except prawcore.exceptions.NotFound: bot.say(""r/"" + s.display_name + "" appears to not have a stickied post!"") return elif sorting == 'random': submissions = [s.random()] or [] else: return if not len(submissions): bot.say(""r/"" + s.display_name + ' ' + sorting + "" appears to have no items!"") return NOLIMIT if sorting != 'sticky': submissions_filter = [] for submission in submissions: if not submission.stickied: submissions_filter.append(submission) submissions = submissions_filter submission = submissions[0] link = ""https://reddit.com/r/"" + s.display_name + ""/comments/"" + str(submission) say_post_info( bot, trigger, re.match(post_url, link).group(1), False, True) " 45404,"def test_join_pandas(): data = test_data[""int_data""] modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data) df_equals( modin_df.join(pandas_df, lsuffix=""left"", on=""col3""), modin_df.join(modin_df, lsuffix=""left"", on=""col3""), ) ","def test_join_pandas(): data = test_data[""int_data""] modin_df, pandas_df = create_test_dfs(data) df_equals( modin_df.join(pandas_df, lsuffix=""left"", on=""col3""), modin_df.join(modin_df, lsuffix=""left"", on=""col3""), ) " 55029,"def pauli_mult(pauli_1, pauli_2, wire_map=None): """"""Multiply two Pauli words together. Two Pauli operations can be multiplied together by taking the additive OR of their binary symplectic representations. Args: pauli_1 (.Operation): A Pauli word. pauli_2 (.Operation): A Pauli word to multiply with the first one. wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli word as keys, and unique integer labels as their values. If no wire map is provided, the map will be constructed from the set of wires acted on by the input Pauli words. Returns: .Operation: The product of pauli_1 and pauli_2 as a Pauli word (ignoring the global phase). **Example** This function enables multiplication of Pauli group elements at the level of Pauli words, rather than matrices. For example, >>> from pennylane.pauli import pauli_mult >>> pauli_1 = qml.PauliX(0) @ qml.PauliZ(1) >>> pauli_2 = qml.PauliY(0) @ qml.PauliZ(1) >>> product = pauli_mult(pauli_1, pauli_2) >>> print(product) PauliZ(wires=[0]) """""" # If no wire map is specified, generate one from the union of wires # in both Paulis. if wire_map is None: wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels) wire_map = {label: i for i, label in enumerate(wire_labels)} # Check if pauli_1 and pauli_2 are the same; if so, the result is the Identity if are_identical_pauli_words(pauli_1, pauli_2): first_wire = list(wire_map.keys())[0] return Identity(first_wire) # Compute binary symplectic representations pauli_1_binary = pauli_to_binary(pauli_1, wire_map=wire_map) pauli_2_binary = pauli_to_binary(pauli_2, wire_map=wire_map) bin_symp_1 = np.array([int(x) for x in pauli_1_binary]) bin_symp_2 = np.array([int(x) for x in pauli_2_binary]) # Shorthand for bitwise XOR of numpy arrays pauli_product = bin_symp_1 ^ bin_symp_2 return binary_to_pauli(pauli_product, wire_map=wire_map) ","def pauli_mult(pauli_1, pauli_2, wire_map=None): """"""Multiply two Pauli words together. Two Pauli operations can be multiplied together by taking the additive OR of their binary symplectic representations. Args: pauli_1 (.Operation): A Pauli word. pauli_2 (.Operation): A Pauli word to multiply with the first one. wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli word as keys, and unique integer labels as their values. If no wire map is provided, the map will be constructed from the set of wires acted on by the input Pauli words. Returns: .Operation: The product of pauli_1 and pauli_2 as a Pauli word (ignoring the global phase). **Example** This function enables multiplication of Pauli group elements at the level of Pauli words, rather than matrices. For example, >>> from pennylane.pauli import pauli_mult >>> pauli_1 = qml.PauliX(0) @ qml.PauliZ(1) >>> pauli_2 = qml.PauliY(0) @ qml.PauliZ(1) >>> product = pauli_mult(pauli_1, pauli_2) >>> print(product) PauliZ(wires=[0]) """""" # If no wire map is specified, generate one from the union of wires # in both Paulis. if wire_map is None: wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels) wire_map = {label: i for i, label in enumerate(wire_labels)} # Check if pauli_1 and pauli_2 are the same; if so, the result is the Identity if are_identical_pauli_words(pauli_1, pauli_2): first_wire = list(wire_map.keys())[0] return Identity(first_wire) # Compute binary symplectic representations pauli_1_binary = pauli_to_binary(pauli_1, wire_map=wire_map) pauli_2_binary = pauli_to_binary(pauli_2, wire_map=wire_map) bin_symp_1 = np.array([int(x) for x in pauli_1_binary]) bin_symp_2 = np.array([int(x) for x in pauli_2_binary]) # Shorthand for bitwise XOR of numpy arrays pauli_product = bin_symp_1 ^ bin_symp_2 return binary_to_pauli(pauli_product, wire_map=wire_map) " 34658,"def _get_fingerprint_of_config_without_epochs( config: Optional[Dict[Text, Any]], ) -> Text: if not config: return """" copied_config = copy.deepcopy(config) for key in [""pipeline"", ""policies""]: if key in copied_config and copied_config[key]: for p in copied_config[key]: if ""epochs"" in p: del p[""epochs""] return rasa.shared.utils.io.deep_container_fingerprint(copied_config) ","def _get_fingerprint_of_config_without_epochs( config: Optional[Dict[Text, Any]], ) -> Text: if not config: return """" copied_config = copy.deepcopy(config) for key in [""pipeline"", ""policies""]: if copied_config.get(key): for p in copied_config[key]: if ""epochs"" in p: del p[""epochs""] return rasa.shared.utils.io.deep_container_fingerprint(copied_config) " 43803,"def wires_to_edges(graph: nx.Graph) -> Dict[int, Tuple[int]]: r""""""Maps the wires of a register of qubits to corresponding edges. **Example** >>> g = nx.complete_graph(4).to_directed() >>> wires_to_edges(g) {0: (0, 1), 1: (0, 2), 2: (0, 3), 3: (1, 0), 4: (1, 2), 5: (1, 3), 6: (2, 0), 7: (2, 1), 8: (2, 3), 9: (3, 0), 10: (3, 1), 11: (3, 2)} Args: graph (nx.Graph): the graph specifying possible edges Returns: Dict[Tuple[int], int]: a mapping from wires to graph edges """""" return {i: edge for i, edge in enumerate(graph.edges)} ","def wires_to_edges(graph: nx.Graph) -> Dict[int, Tuple[int]]: r""""""Maps the wires of a register of qubits to corresponding edges. **Example** >>> g = nx.complete_graph(4).to_directed() >>> wires_to_edges(g) {0: (0, 1), 1: (0, 2), 2: (0, 3), 3: (1, 0), 4: (1, 2), 5: (1, 3), 6: (2, 0), 7: (2, 1), 8: (2, 3), 9: (3, 0), 10: (3, 1), 11: (3, 2)} Args: graph (nx.Graph): the graph specifying possible edges Returns: Dict[Tuple, int]: a mapping from wires to graph edges """""" return {i: edge for i, edge in enumerate(graph.edges)} " 34883,"def collect_device_info(expr): """"""Collect the device allocation map for the given expression. The device ids are propagated from the `device_copy` operators. Parameters ---------- expr : tvm.relay.Expr The input expression. Returns ------- ret : Dict[tvm.relay.expr, int] A dictionary of tvm.relay.expr to device id mapping. """""" return _ir_pass.CollectDeviceInfo(expr) ","def collect_device_info(expr): """"""Collect the device allocation map for the given expression. The device ids are propagated from the `device_copy` operators. Parameters ---------- expr : tvm.relay.Expr The input expression. Returns ------- ret : Dict[tvm.relay.expr, int] A dictionary mapping tvm.relay.Expr to device id. """""" return _ir_pass.CollectDeviceInfo(expr) " 32907,"def _distributed_tracing_setter(self, value): """"""Deprecated: this method has been deprecated in favor of the configuration system. It will be removed in newer versions of the Tracer. """""" deprecate( ""client.dDDTraceDeprecationWarnings deprecated"", message=""Use the configuration object instead `config.get_from(client)['distributed_tracing']` = value`"", category=V1DeprecationWarning, removal_version=""1.0.0"", ) config.get_from(self)[""distributed_tracing""] = value ","def _distributed_tracing_setter(self, value): """"""Deprecated: this method has been deprecated in favor of the configuration system. It will be removed in newer versions of the Tracer. """""" deprecate( ""client.distributed_tracing deprecated"", message=""Use the configuration object instead `config.get_from(client)['distributed_tracing']` = value`"", category=V1DeprecationWarning, removal_version=""1.0.0"", ) config.get_from(self)[""distributed_tracing""] = value " 26020,"def list_handle(client, marker, num_results, **kwargs): from ..track2_util import list_generator directory_path = kwargs.pop(""directory_name"", None) if directory_path and directory_path.startswith('./'): directory_path = directory_path.replace('./', '', 1) dir_client = client.get_directory_client(directory_path=directory_path) file_name = kwargs.pop(""file_name"", None) if file_name: total_path = directory_path + '/' + file_name if directory_path else file_name dir_client = client.get_directory_client(directory_path=total_path) if not dir_client.exists(): dir_client = client.get_directory_client(directory_path=directory_path) client = dir_client.get_file_client(file_name=file_name) kwargs.pop(""recursive"") else: client = dir_client else: client = dir_client generator = client.list_handles(results_per_page=num_results, **kwargs) pages = generator.by_page(continuation_token=marker) # SharePropertiesPaged result = list_generator(pages=pages, num_results=num_results) if pages.continuation_token: next_marker = {""nextMarker"": pages.continuation_token} result.append(next_marker) return result ","def list_handle(client, marker, num_results, **kwargs): from ..track2_util import list_generator directory_path = kwargs.pop(""directory_name"", None) if directory_path and directory_path.startswith('./'): directory_path = directory_path.replace('./', '', 1) dir_client = client.get_directory_client(directory_path=directory_path) file_name = kwargs.pop(""file_name"", None) if file_name: total_path = directory_path + '/' + file_name if directory_path else file_name dir_client = client.get_directory_client(directory_path=total_path) if not dir_client.exists(): dir_client = client.get_directory_client(directory_path=directory_path) client = dir_client.get_file_client(file_name=file_name) kwargs.pop(""recursive"") else: client = dir_client else: client = dir_client generator = client.list_handles(results_per_page=num_results, **kwargs) pages = generator.by_page(continuation_token=marker) # SharePropertiesPaged result = list_generator(pages=pages, num_results=num_results) return {""item"": result, ""nextMarker"": pages.continuation_token} " 30780,"def fetch_incidents(): last_run = demisto.getLastRun() last_incidents_ids = [] if last_run: last_fetch = last_run.get('time') last_fetch = datetime.strptime(last_fetch, TIME_FORMAT) last_incidents_ids = last_run.get('last_event_ids') else: # first time fetching last_fetch = parse_date_range(demisto.params().get('fetch_time', '3 days'), TIME_FORMAT)[0] LOG('iterating on detections, looking for more recent than {}'.format(last_fetch)) incidents = [] new_incidents_ids = [] for raw_detection in get_unacknowledged_detections(last_fetch, per_page=2): LOG('found detection #{}'.format(raw_detection['id'])) incident = detection_to_incident(raw_detection) # the rewJson is a string of dictionary e.g. - ('{""ID"":2,""Type"":5}') incident_id = json.loads(incident['rawJSON']).get(""ID"") if incident_id not in last_incidents_ids: # makes sure that the incidents wasn't fetched before incidents.append(incident) new_incidents_ids.append(incident_id) if incidents: last_fetch = max([get_time_obj(incident['occurred']) for incident in incidents]) # noqa:F812 last_run = {'time': get_time_str(last_fetch), 'last_event_ids': new_incidents_ids} return last_run, incidents ","def fetch_incidents(): last_run = demisto.getLastRun() last_incidents_ids = [] if last_run: last_fetch = last_run.get('time') last_fetch = datetime.strptime(last_fetch, TIME_FORMAT) last_incidents_ids = last_run.get('last_event_ids') else: # first time fetching last_fetch = parse_date_range(demisto.params().get('fetch_time', '3 days'), TIME_FORMAT)[0] LOG('iterating on detections, looking for more recent than {}'.format(last_fetch)) incidents = [] new_incidents_ids = [] for raw_detection in get_unacknowledged_detections(last_fetch, per_page=2): LOG('found a new detection in RedCanary #{}'.format(raw_detection['id'])) incident = detection_to_incident(raw_detection) # the rewJson is a string of dictionary e.g. - ('{""ID"":2,""Type"":5}') incident_id = json.loads(incident['rawJSON']).get(""ID"") if incident_id not in last_incidents_ids: # makes sure that the incidents wasn't fetched before incidents.append(incident) new_incidents_ids.append(incident_id) if incidents: last_fetch = max([get_time_obj(incident['occurred']) for incident in incidents]) # noqa:F812 last_run = {'time': get_time_str(last_fetch), 'last_event_ids': new_incidents_ids} return last_run, incidents " 52963,"def align(v, alignment): remainder = v % alignment if remainder == 0: return v else: return v + remainder ","def align(v, alignment): remainder = v % alignment if remainder == 0: return v else: return v + (alignment - remainder) " 24111,"def config_proxy_skip(proxies, uri, skip_proxy=False): """""" Returns an amended copy of the proxies dictionary - used by `requests`, it will disable the proxy if the uri provided is to be reached directly. :param proxies dict with existing proxies: 'https', 'http', 'no' as pontential keys :param uri uri to determine if proxy is necessary or not. :param skip_proxy if True, the proxy dictionary returned will disable all proxies """""" parsed_uri = urlparse(uri) # disable proxy if necessary if skip_proxy: proxies['http'] = '' proxies['https'] = '' elif proxies.get('no'): urls = [] if isinstance(proxies['no'], string_types): urls = proxies['no'].replace(';', ',').split("","") elif isinstance(proxies['no'], list): urls = proxies['no'] for url in urls: if url in parsed_uri.netloc: if 'http' in proxies: proxies.pop('http') if 'https' in proxies: proxies.pop('https') return proxies ","def config_proxy_skip(proxies, uri, skip_proxy=False): """""" Returns an amended copy of the proxies dictionary - used by `requests`, it will disable the proxy if the uri provided is to be reached directly. :param proxies dict with existing proxies: 'https', 'http', 'no' as pontential keys :param uri uri to determine if proxy is necessary or not. :param skip_proxy If `True`, the returned proxy dictionary will disable all proxies. """""" parsed_uri = urlparse(uri) # disable proxy if necessary if skip_proxy: proxies['http'] = '' proxies['https'] = '' elif proxies.get('no'): urls = [] if isinstance(proxies['no'], string_types): urls = proxies['no'].replace(';', ',').split("","") elif isinstance(proxies['no'], list): urls = proxies['no'] for url in urls: if url in parsed_uri.netloc: if 'http' in proxies: proxies.pop('http') if 'https' in proxies: proxies.pop('https') return proxies " 19914,"def _repo_ref(tmpdir, repo, ref): # if `ref` is explicitly passed, use it if ref: return repo, ref ref = git.head_rev(repo) # if it exists on disk, we'll try and clone it with the local changes if os.path.exists(repo) and git.has_diff('HEAD', repo=repo): logger.warning('Creating temporary repo with uncommitted changes...') shadow = os.path.join(tmpdir, 'shadow-repo') cmd_output('git', 'clone', repo, shadow) cmd_output('git', 'checkout', ref, '-b', '_pc_tmp', cwd=shadow) idx = git.git_path('index', repo=shadow) objs = git.git_path('objects', repo=shadow) env = dict(os.environ, GIT_INDEX_FILE=idx, GIT_OBJECT_DIRECTORY=objs) staged_files = git.get_staged_files(cwd=repo) if (len(staged_files) > 0): cmd_output('git', 'add', *staged_files, cwd=repo, env=env) cmd_output('git', 'add', '-u', cwd=repo, env=env) git.commit(repo=shadow) return shadow, git.head_rev(shadow) else: return repo, ref ","def _repo_ref(tmpdir, repo, ref): # if `ref` is explicitly passed, use it if ref: return repo, ref ref = git.head_rev(repo) # if it exists on disk, we'll try and clone it with the local changes if os.path.exists(repo) and git.has_diff('HEAD', repo=repo): logger.warning('Creating temporary repo with uncommitted changes...') shadow = os.path.join(tmpdir, 'shadow-repo') cmd_output('git', 'clone', repo, shadow) cmd_output('git', 'checkout', ref, '-b', '_pc_tmp', cwd=shadow) idx = git.git_path('index', repo=shadow) objs = git.git_path('objects', repo=shadow) env = dict(os.environ, GIT_INDEX_FILE=idx, GIT_OBJECT_DIRECTORY=objs) staged_files = git.get_staged_files(cwd=repo) if staged_files: cmd_output('git', 'add', *staged_files, cwd=repo, env=env) cmd_output('git', 'add', '-u', cwd=repo, env=env) git.commit(repo=shadow) return shadow, git.head_rev(shadow) else: return repo, ref " 23037,"def new_da_object(dsk, name, chunks, meta=None, dtype=None): """"""Generic constructor for dask.dataframe objects. Decides the appropriate output class based on the type of `meta` provided. """""" if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta): from ..dataframe.core import new_dd_object assert all(len(c) == 1 for c in chunks[1:]) divisions = [None] * (len(chunks[0]) + 1) return new_dd_object(dsk, name, meta, divisions) else: return Array(dsk, name=name, chunks=chunks, meta=meta, dtype=dtype) ","def new_da_object(dsk, name, chunks, meta=None, dtype=None): """"""Generic constructor for dask.array or dask.dataframe objects. Decides the appropriate output class based on the type of `meta` provided. """""" if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta): from ..dataframe.core import new_dd_object assert all(len(c) == 1 for c in chunks[1:]) divisions = [None] * (len(chunks[0]) + 1) return new_dd_object(dsk, name, meta, divisions) else: return Array(dsk, name=name, chunks=chunks, meta=meta, dtype=dtype) " 48166,"def check_several_dictionaries_keys_values_type(parameter_name_expected_type: list): """""" Function checks if parameters dictionaries exist and raises ValueError exception if their key or value have unexpected type :param parameter_name_expected_type: list with tuples that contain dictionary parameter, name for exception message and expected type """""" for ( parameter, name, expected_key_class, expected_value_class, ) in parameter_name_expected_type: if parameter: check_dictionary_keys_values_type( parameter=parameter, parameter_name=name, expected_key_class=expected_key_class, expected_value_class=expected_value_class, ) ","def check_several_dictionaries_keys_values_type(parameter_name_expected_type: list): """""" Function checks if parameters dictionaries exist and raises ValueError exception if their key or value have unexpected type :param parameter_name_expected_type: list with tuples that contain dictionary parameter, name for exception message and expected type """""" for ( parameter, name, expected_key_class, expected_value_class, ) in parameter_name_expected_type: if parameter is not None: check_dictionary_keys_values_type( parameter=parameter, parameter_name=name, expected_key_class=expected_key_class, expected_value_class=expected_value_class, ) " 53284,"def thermal_speed_coefficients(method: str, ndim: int) -> float: r"""""" Get the appropriate coefficient for calculating the thermal speed :math:`v_{th}` based on the given ``method`` and ``ndim``. (See the `~plasmapy.formulary.parameters.thermal_speed` :ref:`Notes ` section for further details.) Parameters ---------- method : `str` Method to be used for calculating the thermal speed. Valid values are ``""most_probable""``, ``""rms""``, ``""mean_magnitude""``, and ``""nrl""``. ndim : `int` Dimensionality (1D, 2D, 3D) of space in which to calculate thermal speed. Valid values are ``1``, ``2``, or ``3``. Raises ------ `ValueError` If ``method`` or ``ndim`` are not a valid value. Notes ----- For a detailed explanation of the different coefficients used to calculate the therml speed, then look to the :ref:`Notes ` section for `~plasmapy.formulary.parameters.thermal_speed`. The possible return values are listed the table .. table:: Thermal speed :math:`v_{th}` coefficients. :widths: 2 1 1 1 1 :width: 100% +--------------+------------+---------------+---------------+---------------+ | ↓ **method** | **ndim** → | ``1`` | ``2`` | ``3`` | +--------------+------------+---------------+---------------+---------------+ | ``""most_probable""`` | .. math:: | .. math:: | .. math:: | | | 0 | 1 | \sqrt{2} | +--------------+------------+---------------+---------------+---------------+ | ``""rms""`` | .. math:: | .. math:: | .. math:: | | | 1 | \sqrt{2} | \sqrt{3} | +--------------+------------+---------------+---------------+---------------+ | ``""mean_magnitude""`` | .. math:: | .. math:: | .. math:: | | | \sqrt{2/π} | \sqrt{π/2} | \sqrt{8/π} | +--------------+------------+---------------+---------------+---------------+ | ``""nrl""`` | .. math:: | | | 1 | +--------------+------------+---------------+---------------+---------------+ Examples -------- >>> thermal_speed_coefficients(method=""most_probable"", ndim=3) 1.414213... """""" _coefficients = { (1, ""most_probable""): 0, (2, ""most_probable""): 1, (3, ""most_probable""): np.sqrt(2), (1, ""rms""): 1, (2, ""rms""): np.sqrt(2), (3, ""rms""): np.sqrt(3), (1, ""mean_magnitude""): np.sqrt(2 / np.pi), (2, ""mean_magnitude""): np.sqrt(np.pi / 2), (3, ""mean_magnitude""): np.sqrt(8 / np.pi), (1, ""nrl""): 1, (2, ""nrl""): 1, (3, ""nrl""): 1, } try: coeff = _coefficients[(ndim, method)] except KeyError: raise ValueError( f""Value for (ndim, method) pair not valid, got '({ndim}, {method})'."" ) return coeff ","def thermal_speed_coefficients(method: str, ndim: int) -> float: r"""""" Get the appropriate coefficient for calculating the thermal speed :math:`v_{th}` based on the given ``method`` and ``ndim``. (See the `~plasmapy.formulary.parameters.thermal_speed` :ref:`Notes ` section for further details.) Parameters ---------- method : `str` Method to be used for calculating the thermal speed. Valid values are ``""most_probable""``, ``""rms""``, ``""mean_magnitude""``, and ``""nrl""``. ndim : `int` Dimensionality (1D, 2D, 3D) of space in which to calculate thermal speed. Valid values are ``1``, ``2``, or ``3``. Raises ------ `ValueError` If ``method`` or ``ndim`` are not a valid value. Notes ----- For a detailed explanation of the different coefficients used to calculate the therml speed, then look to the :ref:`Notes ` section for `~plasmapy.formulary.parameters.thermal_speed`. The possible return values are listed the table .. table:: Thermal speed :math:`v_{th}` coefficients. :widths: 2 1 1 1 1 :width: 100% +--------------+------------+---------------+---------------+---------------+ | ↓ **method** | **ndim** → | ``1`` | ``2`` | ``3`` | +--------------+------------+---------------+---------------+---------------+ | ``""most_probable""`` | .. math:: | .. math:: | .. math:: | | | 0 | 1 | \sqrt{2} | +--------------+------------+---------------+---------------+---------------+ | ``""rms""`` | .. math:: | .. math:: | .. math:: | | | 1 | \sqrt{2} | \sqrt{3} | +--------------+------------+---------------+---------------+---------------+ | ``""mean_magnitude""`` | .. math:: | .. math:: | .. math:: | | | \sqrt{2/π} | \sqrt{π/2} | \sqrt{8/π} | +--------------+------------+---------------+---------------+---------------+ | ``""nrl""`` | .. math:: | | | 1 | +--------------+------------+---------------+---------------+---------------+ Examples -------- >>> thermal_speed_coefficients(method=""most_probable"", ndim=3) 1.414213... """""" _coefficients = { (1, ""most_probable""): 0, (2, ""most_probable""): 1, (3, ""most_probable""): np.sqrt(2), (1, ""rms""): 1, (2, ""rms""): np.sqrt(2), (3, ""rms""): np.sqrt(3), (1, ""mean_magnitude""): np.sqrt(2 / np.pi), (2, ""mean_magnitude""): np.sqrt(np.pi / 2), (3, ""mean_magnitude""): np.sqrt(8 / np.pi), (1, ""nrl""): 1, (2, ""nrl""): 1, (3, ""nrl""): 1, } try: coeff = _coefficients[(ndim, method)] except KeyError: raise ValueError( f""Value for (ndim, method) pair not valid, got '({ndim}, {method})'."" ) return coeff " 33320,"def type_filter(_type, filters, limit=None): _types = [ ""budget_function"", ""budget_subfunction"", ""federal_account"", ""program_activity"", ""object_class"", ""recipient"", ""award"", ""award_category"", ""agency"", ""agency_type"", ""agency_sub"", ] # Validate explorer _type if _type is None: raise InvalidParameterException('Missing Required Request Parameter, ""type"": ""type""') elif _type not in _types: raise InvalidParameterException( ""Type does not have a valid value. "" ""Valid Types: budget_function, budget_subfunction, federal_account, program_activity,"" ""object_class, recipient, award, award_category agency, agency_type, agency_sub"" ) if filters is None: raise InvalidParameterException('Missing Required Request Parameter, ""filters"": { ""filter_options"" }') if ""fy"" not in filters: raise InvalidParameterException('Missing required parameter ""fy"".') if ""quarter"" not in filters and ""period"" not in filters: raise InvalidParameterException('Missing required parameter, provide either ""period"" or ""quarter"".') time_unit = ""quarter"" if ""quarter"" in filters else ""period"" try: fiscal_year = int(filters[""fy""]) if fiscal_year < 1000 or fiscal_year > 9999: raise InvalidParameterException('Incorrect Fiscal Year Parameter, ""fy"": ""YYYY""') except ValueError: raise InvalidParameterException('Incorrect or Missing Fiscal Year Parameter, ""fy"": ""YYYY""') if time_unit == ""quarter"" and filters[""quarter""] not in (""1"", ""2"", ""3"", ""4"", 1, 2, 3, 4): raise InvalidParameterException(""Incorrect value provided for quarter parameter. Must be between 1 and 4"") if time_unit == ""period"" and int(filters[""period""]) not in range(1, 13): raise InvalidParameterException(""Incorrect value provided for period parameter. Must be between 1 and 12"") fiscal_unit = int(filters[time_unit]) if time_unit == ""quarter"": submission_window = DABSSubmissionWindowSchedule.objects.filter( submission_fiscal_year=fiscal_year, submission_fiscal_quarter=fiscal_unit, is_quarter=True, submission_reveal_date__lte=datetime.now(timezone.utc), ).first() else: submission_window = DABSSubmissionWindowSchedule.objects.filter( submission_fiscal_year=fiscal_year, submission_fiscal_month=fiscal_unit, submission_reveal_date__lte=datetime.now(timezone.utc), ).first() if submission_window is None: return {""total"": None} fiscal_date = submission_window.period_end_date fiscal_period = submission_window.submission_fiscal_month # transaction_obligated_amount is summed across all periods in the year up to and including the requested quarter. alt_set = FinancialAccountsByAwards.objects.filter( submission__reporting_fiscal_year=fiscal_year, submission__reporting_fiscal_period__lte=fiscal_period, ).annotate(amount=Sum(""transaction_obligated_amount"")) # obligations_incurred_by_program_object_class_cpe is picked from the final period of the quarter. queryset = FinancialAccountsByProgramActivityObjectClass.objects.filter( submission__reporting_fiscal_year=fiscal_year, submission__reporting_fiscal_period=fiscal_period, ).annotate(amount=Sum(""obligations_incurred_by_program_object_class_cpe"")) # Apply filters to queryset results alt_set, queryset = spending_filter(alt_set, queryset, filters, _type) if _type == ""recipient"" or _type == ""award"" or _type == ""award_category"" or _type == ""agency_type"": # Annotate and get explorer _type filtered results exp = Explorer(alt_set, queryset) if _type == ""recipient"": alt_set = exp.recipient() if _type == ""award"": alt_set = exp.award() if _type == ""award_category"": alt_set = exp.award_category() # Total value of filtered results actual_total = 0 for award in alt_set: award[""id""] = str(award[""id""]) if _type in [""award"", ""award_category""]: code = None for code_type in (""piid"", ""fain"", ""uri""): if award[code_type]: code = award[code_type] break for code_type in (""piid"", ""fain"", ""uri""): del award[code_type] award[""code""] = code if _type == ""award"": award[""name""] = code actual_total += award[""total""] result_set = list(alt_set) # we need to get the File B data for the same set of filters, so we re-run the spending_filter but without setting the _type to any of the alt keys. alt_set2, queryset2 = spending_filter(alt_set, queryset, filters, """") expected_total = queryset2.aggregate(total=Sum(""amount""))[""total""] unreported_obj = {""id"": None, ""code"": None, ""type"": _type, ""name"": UNREPORTED_DATA_NAME, ""amount"": None} if not (actual_total is None or expected_total is None): unreported_obj[""amount""] = expected_total - actual_total result_set.append(unreported_obj) actual_total = expected_total result_set = sorted(result_set, key=lambda k: k[""amount""], reverse=True) result_set = result_set[:limit] if _type == ""award"" else result_set results = {""total"": actual_total, ""end_date"": fiscal_date, ""results"": result_set} else: # Annotate and get explorer _type filtered results exp = Explorer(alt_set, queryset) if _type == ""budget_function"": queryset = exp.budget_function() if _type == ""budget_subfunction"": queryset = exp.budget_subfunction() if _type == ""federal_account"": queryset = exp.federal_account() if _type == ""program_activity"": queryset = exp.program_activity() if _type == ""object_class"": queryset = exp.object_class() if _type == ""agency"": queryset = exp.agency() # Actual total value of filtered results actual_total = queryset.aggregate(total=Sum(""amount""))[""total""] or 0 result_set, expected_total = get_unreported_data_obj( queryset=queryset, filters=filters, limit=limit, spending_type=_type, actual_total=actual_total, fiscal_year=fiscal_year, fiscal_period=fiscal_period, ) results = {""total"": expected_total, ""end_date"": fiscal_date, ""results"": result_set} return results ","def type_filter(_type, filters, limit=None): _types = [ ""budget_function"", ""budget_subfunction"", ""federal_account"", ""program_activity"", ""object_class"", ""recipient"", ""award"", ""award_category"", ""agency"", ""agency_type"", ""agency_sub"", ] # Validate explorer _type if _type is None: raise InvalidParameterException('Missing Required Request Parameter, ""type"": ""type""') elif _type not in _types: raise InvalidParameterException( ""Type does not have a valid value. "" ""Valid Types: budget_function, budget_subfunction, federal_account, program_activity,"" ""object_class, recipient, award, award_category agency, agency_type, agency_sub"" ) if filters is None: raise InvalidParameterException('Missing Required Request Parameter, ""filters"": { ""filter_options"" }') if ""fy"" not in filters: raise InvalidParameterException('Missing required parameter ""fy"".') if ""quarter"" not in filters and ""period"" not in filters: raise InvalidParameterException('Missing required parameter, provide either ""period"" or ""quarter"".') time_unit = ""quarter"" if ""quarter"" in filters else ""period"" try: fiscal_year = int(filters[""fy""]) if fiscal_year < 1000 or fiscal_year > 9999: raise InvalidParameterException('Incorrect Fiscal Year Parameter, ""fy"": ""YYYY""') except ValueError: raise InvalidParameterException('Incorrect or Missing Fiscal Year Parameter, ""fy"": ""YYYY""') if time_unit == ""quarter"" and filters[""quarter""] not in (""1"", ""2"", ""3"", ""4"", 1, 2, 3, 4): raise InvalidParameterException(""Incorrect value provided for quarter parameter. Must be between 1 and 4"") if time_unit == ""period"" and int(filters[""period""]) not in range(1, 13): raise InvalidParameterException(""Incorrect value provided for period parameter. Must be between 1 and 12"") fiscal_unit = int(filters[time_unit]) if time_unit == ""quarter"": submission_window = DABSSubmissionWindowSchedule.objects.filter( submission_fiscal_year=fiscal_year, submission_fiscal_quarter=fiscal_unit, is_quarter=True, submission_reveal_date__lte=datetime.now(timezone.utc), ).first() else: submission_window = DABSSubmissionWindowSchedule.objects.filter( submission_fiscal_year=fiscal_year, submission_fiscal_month=fiscal_unit, submission_reveal_date__lte=datetime.now(timezone.utc), ).first() if submission_window is None: return {""total"": None} fiscal_date = submission_window.period_end_date fiscal_period = submission_window.submission_fiscal_month # transaction_obligated_amount is summed across all periods in the year up to and including the requested quarter. alt_set = FinancialAccountsByAwards.objects.filter( submission__reporting_fiscal_year=fiscal_year, submission__reporting_fiscal_period__lte=fiscal_period, ).annotate(amount=Sum(""transaction_obligated_amount"")) # obligations_incurred_by_program_object_class_cpe is picked from the final period of the quarter. queryset = FinancialAccountsByProgramActivityObjectClass.objects.filter( submission__reporting_fiscal_year=fiscal_year, submission__reporting_fiscal_period=fiscal_period, ).annotate(amount=Sum(""obligations_incurred_by_program_object_class_cpe"")) # Apply filters to queryset results alt_set, queryset = spending_filter(alt_set, queryset, filters, _type) if _type == ""recipient"" or _type == ""award"" or _type == ""award_category"" or _type == ""agency_type"": # Annotate and get explorer _type filtered results exp = Explorer(alt_set, queryset) if _type == ""recipient"": alt_set = exp.recipient() if _type == ""award"": alt_set = exp.award() if _type == ""award_category"": alt_set = exp.award_category() # Total value of filtered results actual_total = 0 for award in alt_set: award[""id""] = str(award[""id""]) if _type in [""award"", ""award_category""]: code = None for code_type in (""piid"", ""fain"", ""uri""): if award[code_type]: code = award[code_type] break for code_type in (""piid"", ""fain"", ""uri""): del award[code_type] award[""code""] = code if _type == ""award"": award[""name""] = code actual_total += award[""total""] result_set = list(alt_set) # we need to get the File B data for the same set of filters, so we re-run the spending_filter but without setting the _type to any of the alt keys. alt_set2, queryset2 = spending_filter(alt_set, queryset, filters, """") expected_total = queryset2.aggregate(total=Sum(""amount""))[""total""] unreported_obj = {""id"": None, ""code"": None, ""type"": _type, ""name"": UNREPORTED_DATA_NAME, ""amount"": None} if not (actual_total is None or expected_total is None): unreported_obj[""amount""] = expected_total - actual_total result_set.append(unreported_obj) actual_total = expected_total result_set.sort(key=lambda k: k[""amount""], reverse=True) result_set = result_set[:limit] if _type == ""award"" else result_set results = {""total"": actual_total, ""end_date"": fiscal_date, ""results"": result_set} else: # Annotate and get explorer _type filtered results exp = Explorer(alt_set, queryset) if _type == ""budget_function"": queryset = exp.budget_function() if _type == ""budget_subfunction"": queryset = exp.budget_subfunction() if _type == ""federal_account"": queryset = exp.federal_account() if _type == ""program_activity"": queryset = exp.program_activity() if _type == ""object_class"": queryset = exp.object_class() if _type == ""agency"": queryset = exp.agency() # Actual total value of filtered results actual_total = queryset.aggregate(total=Sum(""amount""))[""total""] or 0 result_set, expected_total = get_unreported_data_obj( queryset=queryset, filters=filters, limit=limit, spending_type=_type, actual_total=actual_total, fiscal_year=fiscal_year, fiscal_period=fiscal_period, ) results = {""total"": expected_total, ""end_date"": fiscal_date, ""results"": result_set} return results " 57880,"def create_widget_content(entries_data: List[Dict[str, Any]]) -> str: """"""Creates the human readable text for the widget. Args: entries_data (List[Dict[str, Any]]): The data from the RSS feed relevant for the widget. Returns: str: The widget's content. """""" content: str = '' for entry_data in entries_data: content += f'## [{entry_data[""title""]}]({entry_data[""link""]})\n' content += f'_{entry_data[""timestamp""]}_\n' content += f'#### {entry_data[""summary""]}\n' content += '---\n' return content ","def create_widget_content(entries_data: List[Dict[str, Any]]) -> str: """"""Creates the human readable text for the widget. Args: entries_data (List[Dict[str, Any]]): The data from the RSS feed relevant for the widget. Returns: str: The widget's content. """""" content: str = '' for entry_data in entries_data: content += TEMPLATE.format(**entry_data) return content " 32485,"def convert_start_fetch_to_milliseconds(fetch_start_time: str): """""" Convert a timestamp string to milliseconds Args:t fetch_start_time (str): First fetch timestamp. Returns: (int): time since (epoch - first_fetch) in milliseconds. """""" date = dateparser.parse(fetch_start_time, settings={'TIMEZONE': 'UTC'}) if date is None: # if d is None it means dateparser failed to parse it raise ValueError(f'Invalid first_fetch format: {fetch_start_time}') return int(date.timestamp() * 1000) ","def convert_start_fetch_to_milliseconds(fetch_start_time: str): """""" Convert a timestamp string to milliseconds Args: fetch_start_time (str): First fetch timestamp. Returns: (int): time since (epoch - first_fetch) in milliseconds. """""" date = dateparser.parse(fetch_start_time, settings={'TIMEZONE': 'UTC'}) if date is None: # if d is None it means dateparser failed to parse it raise ValueError(f'Invalid first_fetch format: {fetch_start_time}') return int(date.timestamp() * 1000) " 54362,"def test_add_package_with_extras_and_whitespace(app): command = app.find(""init"") result = command._parse_requirements([""databases[postgresql, sqlite]""]) assert result[0][""name""] == ""databases"" assert len(result[0][""extras""]) == 2 assert ""postgresql"" in result[0][""extras""] assert ""sqlite"" in result[0][""extras""] ","def test_add_package_with_extras_and_whitespace(tester): result = tester._parse_requirements([""databases[postgresql, sqlite]""]) assert result[0][""name""] == ""databases"" assert len(result[0][""extras""]) == 2 assert ""postgresql"" in result[0][""extras""] assert ""sqlite"" in result[0][""extras""] " 20104,"def no_prog_err(name): logger.error(""{} not available, check your installation"".format(name)) ","def no_prog_err(name): logger.error(""%s not available, check your installation"", name) " 39431,"def download_dipole_efield(load=True): # pragma: no cover """"""Download the electric field dataset as generated by ``emg3d``. For more details see `emg3d Minimum Example `_. Parameters ---------- load : bool, optional Load the dataset after downloading it when ``True``. Set this to ``False`` and only the filename will be returned. Returns ------- pyvista.UniformGrid or str DataSet or filename depending on ``load``. Examples -------- Download the electric field dataset and plot it as a volume. >>> from pyvista import examples >>> grid = examples.download_dipole_efield() >>> field_slice = grid.slice('z') >>> field_slice.plot( ... scalars='efield_fx', ... cpos='xy', ... rng=[1E-15, 1E-5], ... log_scale=True, ... lighting=False, ... ) Plot the electric field volumetrically. >>> import numpy as np >>> from pyvista import examples >>> grid = examples.download_dipole_efield() >>> data = (grid['efield_fx']*2E10) >>> grid['data'] = data >>> grid.plot(volume=True, opacity='sigmoid_5') """""" return _download_and_read('emg3d/dipole/efield.vtk', load=load) ","def download_dipole_efield(load=True): # pragma: no cover """"""Download the electric field dataset as generated by ``emg3d``. For more details see `emg3d Minimum Example `_. Parameters ---------- load : bool, optional Load the dataset after downloading it when ``True``. Set this to ``False`` and only the filename will be returned. Returns ------- pyvista.UniformGrid or str DataSet or filename depending on ``load``. Examples -------- Download the electric field dataset and plot a slice of it in the XY plane. >>> from pyvista import examples >>> grid = examples.download_dipole_efield() >>> field_slice = grid.slice('z') >>> field_slice.plot( ... scalars='efield_fx', ... cpos='xy', ... rng=[1E-15, 1E-5], ... log_scale=True, ... lighting=False, ... ) Plot the electric field volumetrically. >>> import numpy as np >>> from pyvista import examples >>> grid = examples.download_dipole_efield() >>> data = (grid['efield_fx']*2E10) >>> grid['data'] = data >>> grid.plot(volume=True, opacity='sigmoid_5') """""" return _download_and_read('emg3d/dipole/efield.vtk', load=load) " 4556,"def load_surface(surface): """"""Loads a surface. Parameters ---------- surface : List or numpy.ndarray or Surface Surface to be loaded. This can be passed as: - a list of two files (valid formats are .gii .gii.gz or Freesurfer specific files such as .orig, .pial, .sphere, .white, .inflated) containing: - a surface mesh geometry - surface data - three Numpy arrays organized in a list with coordinates, faces, and data in this specific order - a length 2 tuple or a namedtuple with the fields ""mesh"" and ""data"" - a length 3 tuple or a namedtuple with the fileds ""coordinates"", ""faces"", and ""data"" - a Surface object with ""mesh"" and ""data"" attributes. Returns -------- surface : Surface With the fields ""mesh"" (Mesh object) and ""data"" (numpy.ndarray). """""" # Handle the case where we received a Surface-like # object of a namedtuple with mesh and data attributes if hasattr(surface, ""mesh"") and hasattr(surface, ""data""): mesh = load_surf_mesh(surface.mesh) data = load_surf_data(surface.data) # Handle the case where we received an object with # coordinates, faces, and data attributes elif(hasattr(surface, ""coordinates"") and hasattr(surface, ""faces"") and hasattr(surface, ""data"")): mesh = load_surf_mesh((surface.coordinates, surface.faces)) data = load_surf_data(surface.data) # At this point, we can have an iterable of length # two or three elif isinstance(surface, (list, tuple, np.ndarray)): if len(surface) == 2: mesh = load_surf_mesh(surface[0]) data = load_surf_data(surface[1]) elif len(surface) == 3: mesh = load_surf_mesh(surface[:2]) data = load_surf_data(surface[2]) else: raise ValueError(""`load_surface` accepts iterables "" ""of length 2 or 3 to define a surface. "" ""You provided a {} of length {}."".format( type(surface), len(surface))) else: raise ValueError(""Wrong parameter `surface` in `load_surface`. "" ""Please refer to the documentation for more information."") return Surface(mesh, data) ","def load_surface(surface): """"""Loads a surface. Parameters ---------- surface : List of numpy.ndarray or Surface Surface to be loaded. This can be passed as: - a list of two files (valid formats are .gii .gii.gz or Freesurfer specific files such as .orig, .pial, .sphere, .white, .inflated) containing: - a surface mesh geometry - surface data - three Numpy arrays organized in a list with coordinates, faces, and data in this specific order - a length 2 tuple or a namedtuple with the fields ""mesh"" and ""data"" - a length 3 tuple or a namedtuple with the fileds ""coordinates"", ""faces"", and ""data"" - a Surface object with ""mesh"" and ""data"" attributes. Returns -------- surface : Surface With the fields ""mesh"" (Mesh object) and ""data"" (numpy.ndarray). """""" # Handle the case where we received a Surface-like # object of a namedtuple with mesh and data attributes if hasattr(surface, ""mesh"") and hasattr(surface, ""data""): mesh = load_surf_mesh(surface.mesh) data = load_surf_data(surface.data) # Handle the case where we received an object with # coordinates, faces, and data attributes elif(hasattr(surface, ""coordinates"") and hasattr(surface, ""faces"") and hasattr(surface, ""data"")): mesh = load_surf_mesh((surface.coordinates, surface.faces)) data = load_surf_data(surface.data) # At this point, we can have an iterable of length # two or three elif isinstance(surface, (list, tuple, np.ndarray)): if len(surface) == 2: mesh = load_surf_mesh(surface[0]) data = load_surf_data(surface[1]) elif len(surface) == 3: mesh = load_surf_mesh(surface[:2]) data = load_surf_data(surface[2]) else: raise ValueError(""`load_surface` accepts iterables "" ""of length 2 or 3 to define a surface. "" ""You provided a {} of length {}."".format( type(surface), len(surface))) else: raise ValueError(""Wrong parameter `surface` in `load_surface`. "" ""Please refer to the documentation for more information."") return Surface(mesh, data) " 58061,"def check_spyware_profiles( topology: Topology, device_filter_string: str = None, minimum_block_severities: str = ""critical,high"", minimum_alert_severities: str = ""medium,low"" ) -> ConfigurationHygieneCheckResult: """""" Checks the configured Anti-spyware profiles to ensure at least one meets best practices. :param topology: `Topology` instance !no-auto-argument :param device_filter_string: String to filter to only check given device :param minimum_block_severities: csv list of severities that must be in drop/reset/block-ip mode. :param minimum_alert_severities: csv list of severities that must be in alert/default or higher mode. """""" return HygieneLookups.check_spyware_profiles( topology, device_filter_str=device_filter_string, minimum_block_severities=minimum_block_severities.split("",""), minimum_alert_severities=minimum_alert_severities.split("","") ) ","def check_spyware_profiles( topology: Topology, device_filter_string: str = None, minimum_block_severities: str = ""critical,high"", minimum_alert_severities: str = ""medium,low"" ) -> ConfigurationHygieneCheckResult: """""" Checks the configured Anti-spyware profiles to ensure at least one meets best practices. :param topology: `Topology` instance !no-auto-argument :param device_filter_string: String to filter to only check given device :param minimum_block_severities: csv list of severities that must be in drop/reset/block-ip mode. :param minimum_alert_severities: csv list of severities that must be in alert/default or higher mode. """""" return HygieneLookups.check_spyware_profiles( topology, device_filter_str=device_filter_string, minimum_block_severities=argToList(minimum_block_severities), minimum_alert_severities=argToList(minimum_alert_severities) ) " 32818,"def set_http_meta(config, span, method=None, url=None, status_code=None): if method: span.set_tag(http.METHOD, method) if url: span.set_tag(http.URL, url) if status_code: span.set_tag(http.STATUS_CODE, status_code) if 500 <= int(status_code) < 600: span.error = 1 ","def set_http_meta(config, span, method=None, url=None, status_code=None): if method: span.set_tag(http.METHOD, method) if url: span.meta[http.URL] = url if status_code: span.set_tag(http.STATUS_CODE, status_code) if 500 <= int(status_code) < 600: span.error = 1 " 17928,"def build_pat(node_json): # Ici node_json c'est le dossier 'parameters' """"""Construit le dictionnaire de barèmes des cotisations employeur à partir des paramètres de parameters"""""" pat = ParameterNode(""pat"", data={}) # Génère pat commun = ParameterNode(""commun"", data={}) # Génère commun # Réindexation: nouveaux chemins autres = node_json.prelevements_sociaux.autres_taxes_participations_assises_salaires retraites = node_json.prelevements_sociaux.regimes_complementaires_retraite_secteur_prive chom = node_json.prelevements_sociaux.cotisations_regime_assurance_chomage cotiz = node_json.prelevements_sociaux.cotisations_securite_sociale_regime_general public = node_json.prelevements_sociaux.cotisations_secteur_public # Création de commun # Apprentissage commun.children['apprentissage'] = autres.apprentissage.children['apprentissage'] commun.children['apprentissage_add'] = autres.apprentissage.children['apprentissage_add'] commun.children['apprentissage'] = autres.apprentissage.children['apprentissage'] commun.children['apprentissage_alsace_moselle'] = autres.apprentissage.children['apprentissage_alsace_moselle'] # Formation commun.children['formprof_09'] = autres.formation.children['formprof_09'] commun.children['formprof_1019'] = autres.formation.children['formprof_1019'] commun.children['formprof_20'] = autres.formation.children['formprof_20'] # Construction commun.children['construction'] = autres.construction.children['construction_20'] commun.children['seuil'] = autres.construction.children['seuil'] # Reste commun.children.update(chom.assedic.employeur.children) commun.children.update(chom.chomfg.children) commun.children.update(cotiz.csa.bareme.children) # À harmoniser ! commun.children.update(cotiz.famille.bareme.children) # À harmoniser ! commun.children.update(autres.fnal.children) # À harmoniser ! commun.children.update(autres.fin_syndic.children) # À harmoniser ! commun.children.update(cotiz.penibilite.bareme.children) # À harmoniser ! commun.children.update(cotiz.cnav.bareme.employeur.children) # À harmoniser ! commun.children.update(cotiz.mmid.bareme.employeur.children) # À harmoniser ! + Créer params depuis IPP # Réindexation NonCadre # Initialisation noncadre = ParameterNode(""noncadre"", data={}) pat.add_child('noncadre', noncadre) pat.children['noncadre'].children.update(retraites.employeur.noncadre.children) pat.children['noncadre'].children.update(commun.children) # Réindexation Cadre # Initialisation cadre = ParameterNode(""cadre"", data={}) pat.add_child('cadre', cadre) pat.children['cadre'].children.update(retraites.employeur.cadre.children) pat.children['cadre'].children.update(commun.children) # Réindexation Fonc # Initialisation fonc = ParameterNode(""fonc"", data={}) pat.add_child('fonc', fonc) fonc.add_child('colloc', ParameterNode(""colloc"", data={})) fonc.add_child('etat', ParameterNode(""etat"", data={})) fonc.add_child('contract', ParameterNode(""contract"", data={})) # Contractuel pat.children['fonc'].children['contract'] = public.ircantec.employeur pat.children['fonc'].children['contract'].children.update(commun.children) # Etat pat.children['fonc'].children['etat'].children.update(public.mmid.etat.children) pat.children['fonc'].children['etat'].children.update(public.retraite.ati.children) pat.children['fonc'].children['etat'].children.update(public.rafp.employeur.children) pat.children['fonc'].children['etat'].children.update(public.retraite.pension.employeur.children) # Collectivités Locales pat.children['fonc'].children['colloc'].children['hospitaliere'] = public.cnral.employeur.hospitaliere pat.children['fonc'].children['colloc'].children['territoriale'] = public.cnral.employeur.territoriale pat.children['fonc'].children['colloc'].children.update(public.cnral.employeur.children) pat.children['fonc'].children['colloc'].children.update(public.mmid.colloc.children) pat.children['fonc'].children['colloc'].children.update(public.rafp.employeur.children) # Renaming pat.children['prive_non_cadre'] = pat.children.pop('noncadre') pat.children['prive_cadre'] = pat.children.pop('cadre') # Rework commun to deal with public employees for var in [""apprentissage"", ""apprentissage_add"", ""apprentissage_alsace_moselle"", ""assedic"", ""chomfg"", ""construction"", ""maladie"", ""formprof_09"", ""formprof_1019"", ""formprof_20"", ""vieillesse_deplafonnee"", ""vieillesse_plafonnee""]: del commun.children[var] for var in [""apprentissage"", ""apprentissage_add"", ""apprentissage_alsace_moselle"", ""formprof_09"", ""formprof_1019"", ""formprof_20"", ""chomfg"", ""construction"", ""assedic""]: del pat.children['fonc'].children['contract'].children[var] pat.children['fonc'].children['etat'].children.update(commun.children) pat.children['fonc'].children['colloc'].children.update(commun.children) pat.children['etat_t'] = pat.children['fonc'].children['etat'] pat.children['colloc_t'] = pat.children['fonc'].children['colloc'] pat.children['contract'] = pat.children['fonc'].children['contract'] for var in ['etat', 'colloc', 'contract']: del pat.children['fonc'].children[var] # Renaming pat.children['public_titulaire_etat'] = pat.children.pop('etat_t') # del pat.children['public_titulaire_etat'].children['rafp'] pat.children['public_titulaire_territoriale'] = pat.children.pop('colloc_t') pat.children['public_titulaire_hospitaliere'] = copy.deepcopy(pat.children['public_titulaire_territoriale']) for category in ['territoriale', 'hospitaliere']: for name, bareme in pat.children['public_titulaire_' + category].children[category].children.items(): pat.children['public_titulaire_{}'.format(category)].children[name] = bareme for category in ['territoriale', 'hospitaliere']: del pat.children['public_titulaire_territoriale'].children[category] del pat.children['public_titulaire_hospitaliere'].children[category] pat.children['public_non_titulaire'] = pat.children.pop('contract') return pat ","def build_pat(node_json): # Ici node_json c'est le dossier 'parameters' """"""Construit le dictionnaire de barèmes des cotisations employeur à partir des paramètres de parameters"""""" pat = ParameterNode(""pat"", data={}) # Génère pat commun = ParameterNode(""commun"", data={}) # Génère commun # Réindexation: nouveaux chemins autres = node_json.prelevements_sociaux.autres_taxes_participations_assises_salaires retraites = node_json.prelevements_sociaux.regimes_complementaires_retraite_secteur_prive chomage = node_json.prelevements_sociaux.cotisations_regime_assurance_chomage cotiz = node_json.prelevements_sociaux.cotisations_securite_sociale_regime_general public = node_json.prelevements_sociaux.cotisations_secteur_public # Création de commun # Apprentissage commun.children['apprentissage'] = autres.apprentissage.children['apprentissage'] commun.children['apprentissage_add'] = autres.apprentissage.children['apprentissage_add'] commun.children['apprentissage'] = autres.apprentissage.children['apprentissage'] commun.children['apprentissage_alsace_moselle'] = autres.apprentissage.children['apprentissage_alsace_moselle'] # Formation commun.children['formprof_09'] = autres.formation.children['formprof_09'] commun.children['formprof_1019'] = autres.formation.children['formprof_1019'] commun.children['formprof_20'] = autres.formation.children['formprof_20'] # Construction commun.children['construction'] = autres.construction.children['construction_20'] commun.children['seuil'] = autres.construction.children['seuil'] # Reste commun.children.update(chom.assedic.employeur.children) commun.children.update(chom.chomfg.children) commun.children.update(cotiz.csa.bareme.children) # À harmoniser ! commun.children.update(cotiz.famille.bareme.children) # À harmoniser ! commun.children.update(autres.fnal.children) # À harmoniser ! commun.children.update(autres.fin_syndic.children) # À harmoniser ! commun.children.update(cotiz.penibilite.bareme.children) # À harmoniser ! commun.children.update(cotiz.cnav.bareme.employeur.children) # À harmoniser ! commun.children.update(cotiz.mmid.bareme.employeur.children) # À harmoniser ! + Créer params depuis IPP # Réindexation NonCadre # Initialisation noncadre = ParameterNode(""noncadre"", data={}) pat.add_child('noncadre', noncadre) pat.children['noncadre'].children.update(retraites.employeur.noncadre.children) pat.children['noncadre'].children.update(commun.children) # Réindexation Cadre # Initialisation cadre = ParameterNode(""cadre"", data={}) pat.add_child('cadre', cadre) pat.children['cadre'].children.update(retraites.employeur.cadre.children) pat.children['cadre'].children.update(commun.children) # Réindexation Fonc # Initialisation fonc = ParameterNode(""fonc"", data={}) pat.add_child('fonc', fonc) fonc.add_child('colloc', ParameterNode(""colloc"", data={})) fonc.add_child('etat', ParameterNode(""etat"", data={})) fonc.add_child('contract', ParameterNode(""contract"", data={})) # Contractuel pat.children['fonc'].children['contract'] = public.ircantec.employeur pat.children['fonc'].children['contract'].children.update(commun.children) # Etat pat.children['fonc'].children['etat'].children.update(public.mmid.etat.children) pat.children['fonc'].children['etat'].children.update(public.retraite.ati.children) pat.children['fonc'].children['etat'].children.update(public.rafp.employeur.children) pat.children['fonc'].children['etat'].children.update(public.retraite.pension.employeur.children) # Collectivités Locales pat.children['fonc'].children['colloc'].children['hospitaliere'] = public.cnral.employeur.hospitaliere pat.children['fonc'].children['colloc'].children['territoriale'] = public.cnral.employeur.territoriale pat.children['fonc'].children['colloc'].children.update(public.cnral.employeur.children) pat.children['fonc'].children['colloc'].children.update(public.mmid.colloc.children) pat.children['fonc'].children['colloc'].children.update(public.rafp.employeur.children) # Renaming pat.children['prive_non_cadre'] = pat.children.pop('noncadre') pat.children['prive_cadre'] = pat.children.pop('cadre') # Rework commun to deal with public employees for var in [""apprentissage"", ""apprentissage_add"", ""apprentissage_alsace_moselle"", ""assedic"", ""chomfg"", ""construction"", ""maladie"", ""formprof_09"", ""formprof_1019"", ""formprof_20"", ""vieillesse_deplafonnee"", ""vieillesse_plafonnee""]: del commun.children[var] for var in [""apprentissage"", ""apprentissage_add"", ""apprentissage_alsace_moselle"", ""formprof_09"", ""formprof_1019"", ""formprof_20"", ""chomfg"", ""construction"", ""assedic""]: del pat.children['fonc'].children['contract'].children[var] pat.children['fonc'].children['etat'].children.update(commun.children) pat.children['fonc'].children['colloc'].children.update(commun.children) pat.children['etat_t'] = pat.children['fonc'].children['etat'] pat.children['colloc_t'] = pat.children['fonc'].children['colloc'] pat.children['contract'] = pat.children['fonc'].children['contract'] for var in ['etat', 'colloc', 'contract']: del pat.children['fonc'].children[var] # Renaming pat.children['public_titulaire_etat'] = pat.children.pop('etat_t') # del pat.children['public_titulaire_etat'].children['rafp'] pat.children['public_titulaire_territoriale'] = pat.children.pop('colloc_t') pat.children['public_titulaire_hospitaliere'] = copy.deepcopy(pat.children['public_titulaire_territoriale']) for category in ['territoriale', 'hospitaliere']: for name, bareme in pat.children['public_titulaire_' + category].children[category].children.items(): pat.children['public_titulaire_{}'.format(category)].children[name] = bareme for category in ['territoriale', 'hospitaliere']: del pat.children['public_titulaire_territoriale'].children[category] del pat.children['public_titulaire_hospitaliere'].children[category] pat.children['public_non_titulaire'] = pat.children.pop('contract') return pat " 17384,"def apply_ufunc( func: Callable, *args: Any, input_core_dims: Sequence[Sequence] = None, output_core_dims: Optional[Sequence[Sequence]] = ((),), exclude_dims: AbstractSet = frozenset(), vectorize: bool = False, join: str = ""exact"", dataset_join: str = ""exact"", dataset_fill_value: object = _NO_FILL_VALUE, keep_attrs: bool = False, kwargs: Mapping = None, dask: str = ""forbidden"", output_dtypes: Sequence = None, output_sizes: Mapping[Any, int] = None, # meta: Any = None, ) -> Any: """"""Apply a vectorized function for unlabeled arrays on xarray objects. The function will be mapped over the data variable(s) of the input arguments using xarray's standard rules for labeled computation, including alignment, broadcasting, looping over GroupBy/Dataset variables, and merging of coordinates. Parameters ---------- func : callable Function to call like ``func(*args, **kwargs)`` on unlabeled arrays (``.data``) that returns an array or tuple of arrays. If multiple arguments with non-matching dimensions are supplied, this function is expected to vectorize (broadcast) over axes of positional arguments in the style of NumPy universal functions [1]_ (if this is not the case, set ``vectorize=True``). If this function returns multiple outputs, you must set ``output_core_dims`` as well. *args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars Mix of labeled and/or unlabeled arrays to which to apply the function. input_core_dims : Sequence[Sequence], optional List of the same length as ``args`` giving the list of core dimensions on each input argument that should not be broadcast. By default, we assume there are no core dimensions on any input arguments. For example, ``input_core_dims=[[], ['time']]`` indicates that all dimensions on the first argument and all dimensions other than 'time' on the second argument should be broadcast. Core dimensions are automatically moved to the last axes of input variables before applying ``func``, which facilitates using NumPy style generalized ufuncs [2]_. output_core_dims : List[tuple], optional List of the same length as the number of output arguments from ``func``, giving the list of core dimensions on each output that were not broadcast on the inputs. By default, we assume that ``func`` outputs exactly one array, with axes corresponding to each broadcast dimension. Core dimensions are assumed to appear as the last dimensions of each output in the provided order. exclude_dims : set, optional Core dimensions on the inputs to exclude from alignment and broadcasting entirely. Any input coordinates along these dimensions will be dropped. Each excluded dimension must also appear in ``input_core_dims`` for at least one argument. Only dimensions listed here are allowed to change size between input and output objects. vectorize : bool, optional If True, then assume ``func`` only takes arrays defined over core dimensions as input and vectorize it automatically with :py:func:`numpy.vectorize`. This option exists for convenience, but is almost always slower than supplying a pre-vectorized function. Using this option requires NumPy version 1.12 or newer. join : {'outer', 'inner', 'left', 'right', 'exact'}, optional Method for joining the indexes of the passed objects along each dimension, and the variables of Dataset objects with mismatched data variables: - 'outer': use the union of object indexes - 'inner': use the intersection of object indexes - 'left': use indexes from the first object with each dimension - 'right': use indexes from the last object with each dimension - 'exact': raise `ValueError` instead of aligning when indexes to be aligned are not equal dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional Method for joining variables of Dataset objects with mismatched data variables. - 'outer': take variables from both Dataset objects - 'inner': take only overlapped variables - 'left': take only variables from the first object - 'right': take only variables from the last object - 'exact': data variables on all Dataset objects must match exactly dataset_fill_value : optional Value used in place of missing variables on Dataset inputs when the datasets do not share the exact same ``data_vars``. Required if ``dataset_join not in {'inner', 'exact'}``, otherwise ignored. keep_attrs: boolean, Optional Whether to copy attributes from the first argument to the output. kwargs: dict, optional Optional keyword arguments passed directly on to call ``func``. dask: 'forbidden', 'allowed' or 'parallelized', optional How to handle applying to objects containing lazy data in the form of dask arrays: - 'forbidden' (default): raise an error if a dask array is encountered. - 'allowed': pass dask arrays directly on to ``func``. - 'parallelized': automatically parallelize ``func`` if any of the inputs are a dask array. If used, the ``output_dtypes`` argument must also be provided. Multiple output arguments are supported. output_dtypes : list of dtypes, optional Optional list of output dtypes. Only used if dask='parallelized'. output_sizes : dict, optional Optional mapping from dimension names to sizes for outputs. Only used if dask='parallelized' and new dimensions (not found on inputs) appear on outputs. meta : optional Size-0 object representing the type of array wrapped by dask array. Passed on to ``dask.array.blockwise``. Returns ------- Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or numpy.ndarray, the first type on that list to appear on an input. Examples -------- Calculate the vector magnitude of two arguments: >>> def magnitude(a, b): ... func = lambda x, y: np.sqrt(x ** 2 + y ** 2) ... return xr.apply_ufunc(func, a, b) You can now apply ``magnitude()`` to ``xr.DataArray`` and ``xr.Dataset`` objects, with automatically preserved dimensions and coordinates, e.g., >>> array = xr.DataArray([1, 2, 3], coords=[(""x"", [0.1, 0.2, 0.3])]) >>> magnitude(array, -array) array([1.414214, 2.828427, 4.242641]) Coordinates: * x (x) float64 0.1 0.2 0.3 Plain scalars, numpy arrays and a mix of these with xarray objects is also supported: >>> magnitude(3, 4) 5.0 >>> magnitude(3, np.array([0, 4])) array([3., 5.]) >>> magnitude(array, 0) array([1., 2., 3.]) Coordinates: * x (x) float64 0.1 0.2 0.3 Other examples of how you could use ``apply_ufunc`` to write functions to (very nearly) replicate existing xarray functionality: Compute the mean (``.mean``) over one dimension:: def mean(obj, dim): # note: apply always moves core dimensions to the end return apply_ufunc(np.mean, obj, input_core_dims=[[dim]], kwargs={'axis': -1}) Inner product over a specific dimension (like ``xr.dot``):: def _inner(x, y): result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis]) return result[..., 0, 0] def inner_product(a, b, dim): return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]]) Stack objects along a new dimension (like ``xr.concat``):: def stack(objects, dim, new_coord): # note: this version does not stack coordinates func = lambda *x: np.stack(x, axis=-1) result = apply_ufunc(func, *objects, output_core_dims=[[dim]], join='outer', dataset_fill_value=np.nan) result[dim] = new_coord return result If your function is not vectorized but can be applied only to core dimensions, you can use ``vectorize=True`` to turn into a vectorized function. This wraps :py:func:`numpy.vectorize`, so the operation isn't terribly fast. Here we'll use it to calculate the distance between empirical samples from two probability distributions, using a scipy function that needs to be applied to vectors:: import scipy.stats def earth_mover_distance(first_samples, second_samples, dim='ensemble'): return apply_ufunc(scipy.stats.wasserstein_distance, first_samples, second_samples, input_core_dims=[[dim], [dim]], vectorize=True) Most of NumPy's builtin functions already broadcast their inputs appropriately for use in `apply`. You may find helper functions such as numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also works well with numba's vectorize and guvectorize. Further explanation with examples are provided in the xarray documentation [3]_. See also -------- numpy.broadcast_arrays numba.vectorize numba.guvectorize References ---------- .. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html .. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html .. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation """""" from .groupby import GroupBy from .dataarray import DataArray from .variable import Variable if input_core_dims is None: input_core_dims = ((),) * (len(args)) elif len(input_core_dims) != len(args): raise ValueError( ""input_core_dims must be None or a tuple with the length same to "" ""the number of arguments. Given input_core_dims: {}, "" ""number of args: {}."".format(input_core_dims, len(args)) ) if kwargs is None: kwargs = {} signature = _UFuncSignature(input_core_dims, output_core_dims) if exclude_dims and not exclude_dims <= signature.all_core_dims: raise ValueError( ""each dimension in `exclude_dims` must also be a "" ""core dimension in the function signature"" ) if kwargs: func = functools.partial(func, **kwargs) variables_vfunc = functools.partial( apply_variable_ufunc, func, signature=signature, exclude_dims=exclude_dims, keep_attrs=keep_attrs, dask=dask, vectorize=vectorize, output_dtypes=output_dtypes, output_sizes=output_sizes, ) # feed groupby-apply_ufunc through apply_groupby_func if any(isinstance(a, GroupBy) for a in args): this_apply = functools.partial( apply_ufunc, func, input_core_dims=input_core_dims, output_core_dims=output_core_dims, exclude_dims=exclude_dims, join=join, dataset_join=dataset_join, dataset_fill_value=dataset_fill_value, keep_attrs=keep_attrs, dask=dask, vectorize=vectorize, ) return apply_groupby_func(this_apply, *args) # feed datasets apply_variable_ufunc trough apply_dataset_vfunc elif any(is_dict_like(a) for a in args): return apply_dataset_vfunc( variables_vfunc, *args, signature=signature, join=join, exclude_dims=exclude_dims, dataset_join=dataset_join, fill_value=dataset_fill_value, keep_attrs=keep_attrs, ) # feed DataArray apply_variable_ufunc through apply_dataarray_vfunc elif any(isinstance(a, DataArray) for a in args): return apply_dataarray_vfunc( variables_vfunc, *args, signature=signature, join=join, exclude_dims=exclude_dims, keep_attrs=keep_attrs, ) # feed Variables directly through apply_variable_ufunc elif any(isinstance(a, Variable) for a in args): return variables_vfunc(*args) else: # feed anything else through apply_array_ufunc return apply_array_ufunc(func, *args, dask=dask) ","def apply_ufunc( func: Callable, *args: Any, input_core_dims: Sequence[Sequence] = None, output_core_dims: Optional[Sequence[Sequence]] = ((),), exclude_dims: AbstractSet = frozenset(), vectorize: bool = False, join: str = ""exact"", dataset_join: str = ""exact"", dataset_fill_value: object = _NO_FILL_VALUE, keep_attrs: bool = False, kwargs: Mapping = None, dask: str = ""forbidden"", output_dtypes: Sequence = None, output_sizes: Mapping[Any, int] = None, # meta: Any = None, ) -> Any: """"""Apply a vectorized function for unlabeled arrays on xarray objects. The function will be mapped over the data variable(s) of the input arguments using xarray's standard rules for labeled computation, including alignment, broadcasting, looping over GroupBy/Dataset variables, and merging of coordinates. Parameters ---------- func : callable Function to call like ``func(*args, **kwargs)`` on unlabeled arrays (``.data``) that returns an array or tuple of arrays. If multiple arguments with non-matching dimensions are supplied, this function is expected to vectorize (broadcast) over axes of positional arguments in the style of NumPy universal functions [1]_ (if this is not the case, set ``vectorize=True``). If this function returns multiple outputs, you must set ``output_core_dims`` as well. *args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars Mix of labeled and/or unlabeled arrays to which to apply the function. input_core_dims : Sequence[Sequence], optional List of the same length as ``args`` giving the list of core dimensions on each input argument that should not be broadcast. By default, we assume there are no core dimensions on any input arguments. For example, ``input_core_dims=[[], ['time']]`` indicates that all dimensions on the first argument and all dimensions other than 'time' on the second argument should be broadcast. Core dimensions are automatically moved to the last axes of input variables before applying ``func``, which facilitates using NumPy style generalized ufuncs [2]_. output_core_dims : List[tuple], optional List of the same length as the number of output arguments from ``func``, giving the list of core dimensions on each output that were not broadcast on the inputs. By default, we assume that ``func`` outputs exactly one array, with axes corresponding to each broadcast dimension. Core dimensions are assumed to appear as the last dimensions of each output in the provided order. exclude_dims : set, optional Core dimensions on the inputs to exclude from alignment and broadcasting entirely. Any input coordinates along these dimensions will be dropped. Each excluded dimension must also appear in ``input_core_dims`` for at least one argument. Only dimensions listed here are allowed to change size between input and output objects. vectorize : bool, optional If True, then assume ``func`` only takes arrays defined over core dimensions as input and vectorize it automatically with :py:func:`numpy.vectorize`. This option exists for convenience, but is almost always slower than supplying a pre-vectorized function. Using this option requires NumPy version 1.12 or newer. join : {'outer', 'inner', 'left', 'right', 'exact'}, optional Method for joining the indexes of the passed objects along each dimension, and the variables of Dataset objects with mismatched data variables: - 'outer': use the union of object indexes - 'inner': use the intersection of object indexes - 'left': use indexes from the first object with each dimension - 'right': use indexes from the last object with each dimension - 'exact': raise `ValueError` instead of aligning when indexes to be aligned are not equal dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional Method for joining variables of Dataset objects with mismatched data variables. - 'outer': take variables from both Dataset objects - 'inner': take only overlapped variables - 'left': take only variables from the first object - 'right': take only variables from the last object - 'exact': data variables on all Dataset objects must match exactly dataset_fill_value : optional Value used in place of missing variables on Dataset inputs when the datasets do not share the exact same ``data_vars``. Required if ``dataset_join not in {'inner', 'exact'}``, otherwise ignored. keep_attrs: boolean, Optional Whether to copy attributes from the first argument to the output. kwargs: dict, optional Optional keyword arguments passed directly on to call ``func``. dask: 'forbidden', 'allowed' or 'parallelized', optional How to handle applying to objects containing lazy data in the form of dask arrays: - 'forbidden' (default): raise an error if a dask array is encountered. - 'allowed': pass dask arrays directly on to ``func``. Prefer this option if ``func`` natively supports dask arrays. - 'parallelized': automatically parallelize ``func`` if any of the inputs are a dask array. If used, the ``output_dtypes`` argument must also be provided. Multiple output arguments are supported. output_dtypes : list of dtypes, optional Optional list of output dtypes. Only used if dask='parallelized'. output_sizes : dict, optional Optional mapping from dimension names to sizes for outputs. Only used if dask='parallelized' and new dimensions (not found on inputs) appear on outputs. meta : optional Size-0 object representing the type of array wrapped by dask array. Passed on to ``dask.array.blockwise``. Returns ------- Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or numpy.ndarray, the first type on that list to appear on an input. Examples -------- Calculate the vector magnitude of two arguments: >>> def magnitude(a, b): ... func = lambda x, y: np.sqrt(x ** 2 + y ** 2) ... return xr.apply_ufunc(func, a, b) You can now apply ``magnitude()`` to ``xr.DataArray`` and ``xr.Dataset`` objects, with automatically preserved dimensions and coordinates, e.g., >>> array = xr.DataArray([1, 2, 3], coords=[(""x"", [0.1, 0.2, 0.3])]) >>> magnitude(array, -array) array([1.414214, 2.828427, 4.242641]) Coordinates: * x (x) float64 0.1 0.2 0.3 Plain scalars, numpy arrays and a mix of these with xarray objects is also supported: >>> magnitude(3, 4) 5.0 >>> magnitude(3, np.array([0, 4])) array([3., 5.]) >>> magnitude(array, 0) array([1., 2., 3.]) Coordinates: * x (x) float64 0.1 0.2 0.3 Other examples of how you could use ``apply_ufunc`` to write functions to (very nearly) replicate existing xarray functionality: Compute the mean (``.mean``) over one dimension:: def mean(obj, dim): # note: apply always moves core dimensions to the end return apply_ufunc(np.mean, obj, input_core_dims=[[dim]], kwargs={'axis': -1}) Inner product over a specific dimension (like ``xr.dot``):: def _inner(x, y): result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis]) return result[..., 0, 0] def inner_product(a, b, dim): return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]]) Stack objects along a new dimension (like ``xr.concat``):: def stack(objects, dim, new_coord): # note: this version does not stack coordinates func = lambda *x: np.stack(x, axis=-1) result = apply_ufunc(func, *objects, output_core_dims=[[dim]], join='outer', dataset_fill_value=np.nan) result[dim] = new_coord return result If your function is not vectorized but can be applied only to core dimensions, you can use ``vectorize=True`` to turn into a vectorized function. This wraps :py:func:`numpy.vectorize`, so the operation isn't terribly fast. Here we'll use it to calculate the distance between empirical samples from two probability distributions, using a scipy function that needs to be applied to vectors:: import scipy.stats def earth_mover_distance(first_samples, second_samples, dim='ensemble'): return apply_ufunc(scipy.stats.wasserstein_distance, first_samples, second_samples, input_core_dims=[[dim], [dim]], vectorize=True) Most of NumPy's builtin functions already broadcast their inputs appropriately for use in `apply`. You may find helper functions such as numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also works well with numba's vectorize and guvectorize. Further explanation with examples are provided in the xarray documentation [3]_. See also -------- numpy.broadcast_arrays numba.vectorize numba.guvectorize References ---------- .. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html .. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html .. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation """""" from .groupby import GroupBy from .dataarray import DataArray from .variable import Variable if input_core_dims is None: input_core_dims = ((),) * (len(args)) elif len(input_core_dims) != len(args): raise ValueError( ""input_core_dims must be None or a tuple with the length same to "" ""the number of arguments. Given input_core_dims: {}, "" ""number of args: {}."".format(input_core_dims, len(args)) ) if kwargs is None: kwargs = {} signature = _UFuncSignature(input_core_dims, output_core_dims) if exclude_dims and not exclude_dims <= signature.all_core_dims: raise ValueError( ""each dimension in `exclude_dims` must also be a "" ""core dimension in the function signature"" ) if kwargs: func = functools.partial(func, **kwargs) variables_vfunc = functools.partial( apply_variable_ufunc, func, signature=signature, exclude_dims=exclude_dims, keep_attrs=keep_attrs, dask=dask, vectorize=vectorize, output_dtypes=output_dtypes, output_sizes=output_sizes, ) # feed groupby-apply_ufunc through apply_groupby_func if any(isinstance(a, GroupBy) for a in args): this_apply = functools.partial( apply_ufunc, func, input_core_dims=input_core_dims, output_core_dims=output_core_dims, exclude_dims=exclude_dims, join=join, dataset_join=dataset_join, dataset_fill_value=dataset_fill_value, keep_attrs=keep_attrs, dask=dask, vectorize=vectorize, ) return apply_groupby_func(this_apply, *args) # feed datasets apply_variable_ufunc trough apply_dataset_vfunc elif any(is_dict_like(a) for a in args): return apply_dataset_vfunc( variables_vfunc, *args, signature=signature, join=join, exclude_dims=exclude_dims, dataset_join=dataset_join, fill_value=dataset_fill_value, keep_attrs=keep_attrs, ) # feed DataArray apply_variable_ufunc through apply_dataarray_vfunc elif any(isinstance(a, DataArray) for a in args): return apply_dataarray_vfunc( variables_vfunc, *args, signature=signature, join=join, exclude_dims=exclude_dims, keep_attrs=keep_attrs, ) # feed Variables directly through apply_variable_ufunc elif any(isinstance(a, Variable) for a in args): return variables_vfunc(*args) else: # feed anything else through apply_array_ufunc return apply_array_ufunc(func, *args, dask=dask) " 59942,"def _expected_lsf_directives(job): num_tasks = job.num_tasks or 1 num_tasks_per_node = job.num_tasks_per_node or 1 ptile = min( num_tasks * job.num_cpus_per_task, num_tasks_per_node * job.num_cpus_per_task) return set([ f'#BSUB -J testjob', f'#BSUB -o {job.stdout}', f'#BSUB -e {job.stderr}', f'#BSUB -n {num_tasks}', f'#BSUB -W {int(job.time_limit // 60)}', f'#BSUB -R ""span[ptile={ptile}]""', '#BSUB -x', f'#BSUB --account=spam', f'#BSUB --gres=gpu:4', f'#DW jobdw capacity=100GB', f'#DW stage_in source=/foo', ]) ","def _expected_lsf_directives(job): num_tasks = job.num_tasks or 1 num_tasks_per_node = job.num_tasks_per_node or 1 ptile = min( num_tasks * job.num_cpus_per_task, num_tasks_per_node * job.num_cpus_per_task ) return set([ f'#BSUB -J testjob', f'#BSUB -o {job.stdout}', f'#BSUB -e {job.stderr}', f'#BSUB -n {num_tasks}', f'#BSUB -W {int(job.time_limit // 60)}', f'#BSUB -R ""span[ptile={ptile}]""', '#BSUB -x', f'#BSUB --account=spam', f'#BSUB --gres=gpu:4', f'#DW jobdw capacity=100GB', f'#DW stage_in source=/foo', ]) " 6513,"def get_stock_availability(item_code, warehouse): stock_qty = frappe.utils.flt( frappe.db.get_value(""Bin"", { ""item_code"": item_code, ""warehouse"": warehouse }, ""actual_qty"") ) return True if stock_qty else False ","def get_stock_availability(item_code, warehouse): stock_qty = frappe.utils.flt( frappe.db.get_value(""Bin"", { ""item_code"": item_code, ""warehouse"": warehouse }, ""actual_qty"") ) return bool(stock_qty) " 277,"def sample(draws=500, step=None, init='auto', n_init=200000, start=None, trace=None, chain_idx=0, chains=None, cores=None, tune=500, progressbar=True, model=None, random_seed=None, discard_tuned_samples=True, compute_convergence_checks=True, **kwargs): """"""Draw samples from the posterior using the given step methods. Multiple step methods are supported via compound step methods. Parameters ---------- draws : int The number of samples to draw. Defaults to 500. The number of tuned samples are discarded by default. See `discard_tuned_samples`. step : function or iterable of functions A step function or collection of functions. If there are variables without a step methods, step methods for those variables will be assigned automatically. init : str Initialization method to use for auto-assigned NUTS samplers. * auto : Choose a default initialization method automatically. Currently, this is `'jitter+adapt_diag'`, but this can change in the future. If you depend on the exact behaviour, choose an initialization method explicitly. * adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the variance of the tuning samples. All chains use the test value (usually the prior mean) as starting point. * jitter+adapt_diag : Same as `adapt_diag`\, but add uniform jitter in [-1, 1] to the starting point in each chain. * advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the sample variance of the tuning samples. * advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based on the variance of the gradients during tuning. This is **experimental** and might be removed in a future release. * advi : Run ADVI to estimate posterior mean and diagonal mass matrix. * advi_map: Initialize ADVI with MAP and use MAP as starting point. * map : Use the MAP as starting point. This is discouraged. * nuts : Run NUTS and estimate posterior mean and mass matrix from the trace. n_init : int Number of iterations of initializer. Only works for 'nuts' and 'ADVI'. If 'ADVI', number of iterations, if 'nuts', number of draws. start : dict, or array of dict Starting point in parameter space (or partial point) Defaults to `trace.point(-1))` if there is a trace provided and model.test_point if not (defaults to empty dict). Initialization methods for NUTS (see `init` keyword) can overwrite the default. For 'SMC' step method, `start` should be a list of dicts of length = `chains`. trace : backend, list, or MultiTrace This should be a backend instance, a list of variables to track, or a MultiTrace object with past values. If a MultiTrace object is given, it must contain samples for the chain number `chain`. If None or a list of variables, the NDArray backend is used. Passing either ""text"" or ""sqlite"" is taken as a shortcut to set up the corresponding backend (with ""mcmc"" used as the base name). Ignored when using 'SMC' as step method. chain_idx : int Chain number used to store sample in backend. If `chains` is greater than one, chain numbers will start here. Ignored when using 'SMC' as step method. chains : int The number of chains to sample. Running independent chains is important for some convergence statistics and can also reveal multiple modes in the posterior. If `None`, then set to either `cores` or 2, whichever is larger. For SMC the number of chains is the number of draws. cores : int The number of chains to run in parallel. If `None`, set to the number of CPUs in the system, but at most 4. When using 'SMC', this parameter will be ignored if running with `pm.SMC(parallel=False)`. Keep in mind that some chains might themselves be multithreaded via openmp or BLAS. In those cases it might be faster to set this to 1. tune : int Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition to the number specified in the `draws` argument, and will be discarded unless `discard_tuned_samples` is set to False. progressbar : bool Whether or not to display a progress bar in the command line. The bar shows the percentage of completion, the sampling speed in samples per second (SPS), and the estimated remaining time until completion (""expected time of arrival""; ETA). model : Model (optional if in `with` context) random_seed : int or list of ints A list is accepted if `cores` is greater than one. discard_tuned_samples : bool Whether to discard posterior samples of the tune interval. Ignored when using 'SMC' compute_convergence_checks : bool, default=True Whether to compute sampler statistics like Gelman-Rubin and `effective_n`. Ignored when using 'SMC' Returns ------- trace : pymc3.backends.base.MultiTrace A `MultiTrace` object that contains the samples. Notes ----- Optional keyword arguments can be passed to `sample` to be delivered to the `step_method`s used during sampling. In particular, the NUTS step method accepts a number of arguments. Common options are: * target_accept: float in [0, 1]. The step size is tuned such that we approximate this acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic posteriors. * max_treedepth: The maximum depth of the trajectory tree. * step_scale: float, default 0.25 The initial guess for the step size scaled down by :math:`1/n**(1/4)` You can find a full list of arguments in the docstring of the step methods. Examples -------- .. code:: ipython >>> import pymc3 as pm ... n = 100 ... h = 61 ... alpha = 2 ... beta = 2 .. code:: ipython >>> with pm.Model() as model: # context management ... p = pm.Beta('p', alpha=alpha, beta=beta) ... y = pm.Binomial('y', n=n, p=p, observed=h) ... trace = pm.sample(2000, tune=1000, cores=4) >>> pm.summary(trace) mean sd mc_error hpd_2.5 hpd_97.5 p 0.604625 0.047086 0.00078 0.510498 0.694774 """""" model = modelcontext(model) nuts_kwargs = kwargs.pop('nuts_kwargs', None) if nuts_kwargs is not None: warnings.warn(""The nuts_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(nuts_kwargs) step_kwargs = kwargs.pop('step_kwargs', None) if step_kwargs is not None: warnings.warn(""The step_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(step_kwargs) if cores is None: cores = min(4, _cpu_count()) if isinstance(step, pm.step_methods.smc.SMC): trace = smc.sample_smc(draws=draws, step=step, start=start, cores=cores, progressbar=progressbar, model=model, random_seed=random_seed) else: if 'njobs' in kwargs: cores = kwargs['njobs'] warnings.warn( ""The njobs argument has been deprecated. Use cores instead."", DeprecationWarning) if 'nchains' in kwargs: chains = kwargs['nchains'] warnings.warn( ""The nchains argument has been deprecated. Use chains instead."", DeprecationWarning) if chains is None: chains = max(2, cores) if isinstance(start, dict): start = [start] * chains if random_seed == -1: random_seed = None if chains == 1 and isinstance(random_seed, int): random_seed = [random_seed] if random_seed is None or isinstance(random_seed, int): if random_seed is not None: np.random.seed(random_seed) random_seed = [np.random.randint(2 ** 30) for _ in range(chains)] if not isinstance(random_seed, Iterable): raise TypeError( 'Invalid value for `random_seed`. Must be tuple, list or int') if 'chain' in kwargs: chain_idx = kwargs['chain'] warnings.warn( ""The chain argument has been deprecated. Use chain_idx instead."", DeprecationWarning) if start is not None: for start_vals in start: _check_start_shape(model, start_vals) # small trace warning if draws == 0: msg = ""Tuning was enabled throughout the whole trace."" _log.warning(msg) elif draws < 500: msg = ""Only %s samples in chain."" % draws _log.warning(msg) draws += tune if model.ndim == 0: raise ValueError('The model does not contain any free variables.') if step is None and init is not None and all_continuous(model.vars): try: # By default, try to use NUTS _log.info('Auto-assigning NUTS sampler...') start_, step = init_nuts(init=init, chains=chains, n_init=n_init, model=model, random_seed=random_seed, progressbar=progressbar, **kwargs) if start is None: start = start_ except (AttributeError, NotImplementedError, tg.NullTypeGradError): # gradient computation failed _log.info(""Initializing NUTS failed. "" ""Falling back to elementwise auto-assignment."") _log.debug('Exception in init nuts', exec_info=True) step = assign_step_methods(model, step, step_kwargs=kwargs) else: step = assign_step_methods(model, step, step_kwargs=kwargs) if isinstance(step, list): step = CompoundStep(step) if start is None: start = {} if isinstance(start, dict): start = [start] * chains sample_args = {'draws': draws, 'step': step, 'start': start, 'trace': trace, 'chain': chain_idx, 'chains': chains, 'tune': tune, 'progressbar': progressbar, 'model': model, 'random_seed': random_seed, 'cores': cores, } sample_args.update(kwargs) has_population_samplers = np.any([isinstance(m, arraystep.PopulationArrayStepShared) for m in (step.methods if isinstance(step, CompoundStep) else [step])]) parallel = cores > 1 and chains > 1 and not has_population_samplers if parallel: _log.info('Multiprocess sampling ({} chains in {} jobs)'.format(chains, cores)) _print_step_hierarchy(step) try: trace = _mp_sample(**sample_args) except pickle.PickleError: _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False except AttributeError as e: if str(e).startswith(""AttributeError: Can't pickle""): _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False else: raise if not parallel: if has_population_samplers: _log.info('Population sampling ({} chains)'.format(chains)) _print_step_hierarchy(step) trace = _sample_population(**sample_args, parallelize=cores > 1) else: _log.info('Sequential sampling ({} chains in 1 job)'.format(chains)) _print_step_hierarchy(step) trace = _sample_many(**sample_args) discard = tune if discard_tuned_samples else 0 trace = trace[discard:] if compute_convergence_checks: if draws-tune < 100: warnings.warn(""The number of samples is too small to check convergence reliably."") else: trace.report._run_convergence_checks(trace, model) trace.report._log_summary() return trace ","def sample(draws=500, step=None, init='auto', n_init=200000, start=None, trace=None, chain_idx=0, chains=None, cores=None, tune=500, progressbar=True, model=None, random_seed=None, discard_tuned_samples=True, compute_convergence_checks=True, **kwargs): """"""Draw samples from the posterior using the given step methods. Multiple step methods are supported via compound step methods. Parameters ---------- draws : int The number of samples to draw. Defaults to 500. The number of tuned samples are discarded by default. See `discard_tuned_samples`. step : function or iterable of functions A step function or collection of functions. If there are variables without a step methods, step methods for those variables will be assigned automatically. init : str Initialization method to use for auto-assigned NUTS samplers. * auto : Choose a default initialization method automatically. Currently, this is `'jitter+adapt_diag'`, but this can change in the future. If you depend on the exact behaviour, choose an initialization method explicitly. * adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the variance of the tuning samples. All chains use the test value (usually the prior mean) as starting point. * jitter+adapt_diag : Same as `adapt_diag`\, but add uniform jitter in [-1, 1] to the starting point in each chain. * advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the sample variance of the tuning samples. * advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based on the variance of the gradients during tuning. This is **experimental** and might be removed in a future release. * advi : Run ADVI to estimate posterior mean and diagonal mass matrix. * advi_map: Initialize ADVI with MAP and use MAP as starting point. * map : Use the MAP as starting point. This is discouraged. * nuts : Run NUTS and estimate posterior mean and mass matrix from the trace. n_init : int Number of iterations of initializer. Only works for 'nuts' and 'ADVI'. If 'ADVI', number of iterations, if 'nuts', number of draws. start : dict, or array of dict Starting point in parameter space (or partial point) Defaults to `trace.point(-1))` if there is a trace provided and model.test_point if not (defaults to empty dict). Initialization methods for NUTS (see `init` keyword) can overwrite the default. For 'SMC' step method, `start` should be a list of dicts of length = ``chains``. trace : backend, list, or MultiTrace This should be a backend instance, a list of variables to track, or a MultiTrace object with past values. If a MultiTrace object is given, it must contain samples for the chain number `chain`. If None or a list of variables, the NDArray backend is used. Passing either ""text"" or ""sqlite"" is taken as a shortcut to set up the corresponding backend (with ""mcmc"" used as the base name). Ignored when using 'SMC' as step method. chain_idx : int Chain number used to store sample in backend. If `chains` is greater than one, chain numbers will start here. Ignored when using 'SMC' as step method. chains : int The number of chains to sample. Running independent chains is important for some convergence statistics and can also reveal multiple modes in the posterior. If `None`, then set to either `cores` or 2, whichever is larger. For SMC the number of chains is the number of draws. cores : int The number of chains to run in parallel. If `None`, set to the number of CPUs in the system, but at most 4. When using 'SMC', this parameter will be ignored if running with `pm.SMC(parallel=False)`. Keep in mind that some chains might themselves be multithreaded via openmp or BLAS. In those cases it might be faster to set this to 1. tune : int Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition to the number specified in the `draws` argument, and will be discarded unless `discard_tuned_samples` is set to False. progressbar : bool Whether or not to display a progress bar in the command line. The bar shows the percentage of completion, the sampling speed in samples per second (SPS), and the estimated remaining time until completion (""expected time of arrival""; ETA). model : Model (optional if in `with` context) random_seed : int or list of ints A list is accepted if `cores` is greater than one. discard_tuned_samples : bool Whether to discard posterior samples of the tune interval. Ignored when using 'SMC' compute_convergence_checks : bool, default=True Whether to compute sampler statistics like Gelman-Rubin and `effective_n`. Ignored when using 'SMC' Returns ------- trace : pymc3.backends.base.MultiTrace A `MultiTrace` object that contains the samples. Notes ----- Optional keyword arguments can be passed to `sample` to be delivered to the `step_method`s used during sampling. In particular, the NUTS step method accepts a number of arguments. Common options are: * target_accept: float in [0, 1]. The step size is tuned such that we approximate this acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic posteriors. * max_treedepth: The maximum depth of the trajectory tree. * step_scale: float, default 0.25 The initial guess for the step size scaled down by :math:`1/n**(1/4)` You can find a full list of arguments in the docstring of the step methods. Examples -------- .. code:: ipython >>> import pymc3 as pm ... n = 100 ... h = 61 ... alpha = 2 ... beta = 2 .. code:: ipython >>> with pm.Model() as model: # context management ... p = pm.Beta('p', alpha=alpha, beta=beta) ... y = pm.Binomial('y', n=n, p=p, observed=h) ... trace = pm.sample(2000, tune=1000, cores=4) >>> pm.summary(trace) mean sd mc_error hpd_2.5 hpd_97.5 p 0.604625 0.047086 0.00078 0.510498 0.694774 """""" model = modelcontext(model) nuts_kwargs = kwargs.pop('nuts_kwargs', None) if nuts_kwargs is not None: warnings.warn(""The nuts_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(nuts_kwargs) step_kwargs = kwargs.pop('step_kwargs', None) if step_kwargs is not None: warnings.warn(""The step_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(step_kwargs) if cores is None: cores = min(4, _cpu_count()) if isinstance(step, pm.step_methods.smc.SMC): trace = smc.sample_smc(draws=draws, step=step, start=start, cores=cores, progressbar=progressbar, model=model, random_seed=random_seed) else: if 'njobs' in kwargs: cores = kwargs['njobs'] warnings.warn( ""The njobs argument has been deprecated. Use cores instead."", DeprecationWarning) if 'nchains' in kwargs: chains = kwargs['nchains'] warnings.warn( ""The nchains argument has been deprecated. Use chains instead."", DeprecationWarning) if chains is None: chains = max(2, cores) if isinstance(start, dict): start = [start] * chains if random_seed == -1: random_seed = None if chains == 1 and isinstance(random_seed, int): random_seed = [random_seed] if random_seed is None or isinstance(random_seed, int): if random_seed is not None: np.random.seed(random_seed) random_seed = [np.random.randint(2 ** 30) for _ in range(chains)] if not isinstance(random_seed, Iterable): raise TypeError( 'Invalid value for `random_seed`. Must be tuple, list or int') if 'chain' in kwargs: chain_idx = kwargs['chain'] warnings.warn( ""The chain argument has been deprecated. Use chain_idx instead."", DeprecationWarning) if start is not None: for start_vals in start: _check_start_shape(model, start_vals) # small trace warning if draws == 0: msg = ""Tuning was enabled throughout the whole trace."" _log.warning(msg) elif draws < 500: msg = ""Only %s samples in chain."" % draws _log.warning(msg) draws += tune if model.ndim == 0: raise ValueError('The model does not contain any free variables.') if step is None and init is not None and all_continuous(model.vars): try: # By default, try to use NUTS _log.info('Auto-assigning NUTS sampler...') start_, step = init_nuts(init=init, chains=chains, n_init=n_init, model=model, random_seed=random_seed, progressbar=progressbar, **kwargs) if start is None: start = start_ except (AttributeError, NotImplementedError, tg.NullTypeGradError): # gradient computation failed _log.info(""Initializing NUTS failed. "" ""Falling back to elementwise auto-assignment."") _log.debug('Exception in init nuts', exec_info=True) step = assign_step_methods(model, step, step_kwargs=kwargs) else: step = assign_step_methods(model, step, step_kwargs=kwargs) if isinstance(step, list): step = CompoundStep(step) if start is None: start = {} if isinstance(start, dict): start = [start] * chains sample_args = {'draws': draws, 'step': step, 'start': start, 'trace': trace, 'chain': chain_idx, 'chains': chains, 'tune': tune, 'progressbar': progressbar, 'model': model, 'random_seed': random_seed, 'cores': cores, } sample_args.update(kwargs) has_population_samplers = np.any([isinstance(m, arraystep.PopulationArrayStepShared) for m in (step.methods if isinstance(step, CompoundStep) else [step])]) parallel = cores > 1 and chains > 1 and not has_population_samplers if parallel: _log.info('Multiprocess sampling ({} chains in {} jobs)'.format(chains, cores)) _print_step_hierarchy(step) try: trace = _mp_sample(**sample_args) except pickle.PickleError: _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False except AttributeError as e: if str(e).startswith(""AttributeError: Can't pickle""): _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False else: raise if not parallel: if has_population_samplers: _log.info('Population sampling ({} chains)'.format(chains)) _print_step_hierarchy(step) trace = _sample_population(**sample_args, parallelize=cores > 1) else: _log.info('Sequential sampling ({} chains in 1 job)'.format(chains)) _print_step_hierarchy(step) trace = _sample_many(**sample_args) discard = tune if discard_tuned_samples else 0 trace = trace[discard:] if compute_convergence_checks: if draws-tune < 100: warnings.warn(""The number of samples is too small to check convergence reliably."") else: trace.report._run_convergence_checks(trace, model) trace.report._log_summary() return trace " 29507,"def process_message_attachments( attachments: List[Dict[str, Any]], realm_id: int, message_id: int, user_id: int, user_handler: UserHandler, zerver_attachment: List[ZerverFieldsT], uploads_list: List[ZerverFieldsT], mattermost_data_dir: str, output_dir: str, ) -> Dict[str, Any]: has_image = False markdown_links = [] for attachment in attachments: attachment_path = attachment[""path""] attachment_full_path = os.path.join(mattermost_data_dir, ""data"", attachment_path) file_name = attachment_path.split(""/"")[-1] file_ext = file_name.split(""."")[-1] if file_ext.lower() in [""bmp"", ""gif"", ""jpg"", ""jpeg"", ""png"", ""webp""]: # The file extensions above are taken from `markdown.js` # variable `backend_only_markdown_re`. has_image = True s3_path = ""/"".join( [ str(realm_id), format(random.randint(0, 255), ""x""), secrets.token_urlsafe(18), sanitize_name(file_name), ] ) content_for_link = ""[{}](/user_uploads/{})"".format(file_name, s3_path) markdown_links.append(content_for_link) fileinfo = { ""name"": file_name, ""size"": os.path.getsize(attachment_full_path), ""created"": os.path.getmtime(attachment_full_path), } upload = dict( path=s3_path, realm_id=realm_id, content_type=None, user_profile_id=user_id, last_modified=fileinfo[""created""], user_profile_email=user_handler.get_user(user_id=user_id)[""email""], s3_path=s3_path, size=fileinfo[""size""], ) uploads_list.append(upload) build_attachment( realm_id=realm_id, message_ids={message_id}, user_id=user_id, fileinfo=fileinfo, s3_path=s3_path, zerver_attachment=zerver_attachment, ) # Copy the attachment file to output_dir attachment_out_path = os.path.join(output_dir, ""uploads"", s3_path) os.makedirs(os.path.dirname(attachment_out_path), exist_ok=True) shutil.copyfile(attachment_full_path, attachment_out_path) content = ""\n"".join(markdown_links) return dict( content=content, has_image=has_image, ) ","def process_message_attachments( attachments: List[Dict[str, Any]], realm_id: int, message_id: int, user_id: int, user_handler: UserHandler, zerver_attachment: List[ZerverFieldsT], uploads_list: List[ZerverFieldsT], mattermost_data_dir: str, output_dir: str, ) -> Dict[str, Any]: has_image = False markdown_links = [] for attachment in attachments: attachment_path = attachment[""path""] attachment_full_path = os.path.join(mattermost_data_dir, ""data"", attachment_path) file_name = attachment_path.split(""/"")[-1] file_ext = file_name.split(""."")[-1] if file_ext.lower() in [""bmp"", ""gif"", ""jpg"", ""jpeg"", ""png"", ""webp""]: # The file extensions above are taken from `markdown.js` # variable `backend_only_markdown_re`. has_image = True s3_path = ""/"".join( [ str(realm_id), format(random.randint(0, 255), ""x""), secrets.token_urlsafe(18), sanitize_name(file_name), ] ) content_for_link = f""[file_name](/user_uploads/{s3_path})"" markdown_links.append(content_for_link) fileinfo = { ""name"": file_name, ""size"": os.path.getsize(attachment_full_path), ""created"": os.path.getmtime(attachment_full_path), } upload = dict( path=s3_path, realm_id=realm_id, content_type=None, user_profile_id=user_id, last_modified=fileinfo[""created""], user_profile_email=user_handler.get_user(user_id=user_id)[""email""], s3_path=s3_path, size=fileinfo[""size""], ) uploads_list.append(upload) build_attachment( realm_id=realm_id, message_ids={message_id}, user_id=user_id, fileinfo=fileinfo, s3_path=s3_path, zerver_attachment=zerver_attachment, ) # Copy the attachment file to output_dir attachment_out_path = os.path.join(output_dir, ""uploads"", s3_path) os.makedirs(os.path.dirname(attachment_out_path), exist_ok=True) shutil.copyfile(attachment_full_path, attachment_out_path) content = ""\n"".join(markdown_links) return dict( content=content, has_image=has_image, ) " 34684,"def int_arg( request: Request, key: Text, default: Optional[int] = None ) -> Optional[int]: """"""Return a passed argument cast as an int or None. Checks the `key` parameter of the request if it contains a valid int value. If not, `default` is returned. """""" arg = request.args.get(key, default) if arg is default: return arg try: return int(str(arg)) except (ValueError, TypeError): logger.warning(f""Failed to convert '{arg}' to int."") return default ","def int_arg( request: Request, key: Text, default: Optional[int] = None ) -> Optional[int]: """"""Returns a passed argument cast as an int or None. Checks the `key` parameter of the request if it contains a valid int value. If not, `default` is returned. """""" arg = request.args.get(key, default) if arg is default: return arg try: return int(str(arg)) except (ValueError, TypeError): logger.warning(f""Failed to convert '{arg}' to int."") return default " 1636,"def permutation_importance(estimator, X, y, scoring=None, n_repeats=5, n_jobs=None, random_state=None): """"""Permutation importance for feature evaluation [BRE]_. The :term:`estimator` is required to be a fitted estimator. `X` can be the data set used to train the estimator or a hold-out set. The permutation importance of a feature is calculated as follows. First, a baseline metric, defined by :term:`scoring`, is evaluated on a (potentially different) dataset defined by the `X`. Next, a feature column from the validation set is permuted and the metric is evaluated again. The permutation importance is defined to be the difference between the baseline metric and metric from permutating the feature column. Read more in the :ref:`User Guide `. Parameters ---------- estimator : object An estimator that has already been :term:`fitted` and is compatible with :term:`scorer`. X : ndarray or DataFrame, shape (n_samples, n_features) Data on which permutation importance will be computed. y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) Targets for supervised or `None` for unsupervised. scoring : string, callable or None, default=None Scorer to use. It can be a single string (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`). If None, the estimator's default scorer is used. n_repeats : int, default=5 Number of times to permute a feature. n_jobs : int or None, default=None The number of jobs to use for the computation. `None` means 1 unless in a :obj:`joblib.parallel_backend` context. `-1` means using all processors. See :term:`Glossary ` for more details. random_state : int, RandomState instance, or None, default=None Pseudo-random number generator to control the permutations of each feature. See :term:`random_state`. Returns ------- result : Bunch Dictionary-like object, with attributes: importances_mean : ndarray, shape (n_features, ) Mean of feature importance over `n_repeats`. importances_std : ndarray, shape (n_features, ) Standard deviation over `n_repeats`. importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores. Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.inspection import permutation_importance >>> X = [[1,9,9],[1,9,9],[1,9,9], ... [0,9,9],[0,9,9],[0,9,9]] >>> y = [1,1,1,0,0,0] >>> clf = LogisticRegression() >>> clf.fit(X,y) LogisticRegression() >>> result = permutation_importance(clf, X, y, n_repeats=10, ... random_state=42) >>> result.importances_mean array([0.5, 0. , 0. ]) >>> result.importances_std array([0.2236068, 0. , 0. ]) References ---------- .. [BRE] L. Breiman, ""Random Forests"", Machine Learning, 45(1), 5-32, 2001. https://doi.org/10.1023/A:1010933404324 """""" if not hasattr(X, ""iloc""): X = check_array(X, force_all_finite='allow-nan', dtype=None) # Precompute random seed from the random state to be used # to get a fresh independent RandomState instance for each # parallel call to _calculate_permutation_scores, irrespective of # the fact that variables are shared or not depending on the active # joblib backend (sequential, thread-based or process-based). random_state = check_random_state(random_state) random_seed = random_state.randint(np.iinfo(np.int32).max + 1) scorer = check_scoring(estimator, scoring=scoring) baseline_score = scorer(estimator, X, y) scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)( estimator, X, y, col_idx, random_seed, n_repeats, scorer ) for col_idx in range(X.shape[1])) importances = baseline_score - np.array(scores) return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances) ","def permutation_importance(estimator, X, y, scoring=None, n_repeats=5, n_jobs=None, random_state=None): """"""Permutation importance for feature evaluation [BRE]_. The :term:`estimator` is required to be a fitted estimator. `X` can be the data set used to train the estimator or a hold-out set. The permutation importance of a feature is calculated as follows. First, a baseline metric, defined by :term:`scoring`, is evaluated on a (potentially different) dataset defined by the `X`. Next, a feature column from the validation set is permuted and the metric is evaluated again. The permutation importance is defined to be the difference between the baseline metric and metric from permutating the feature column. Read more in the :ref:`User Guide `. Parameters ---------- estimator : object An estimator that has already been :term:`fitted` and is compatible with :term:`scorer`. X : ndarray or DataFrame, shape (n_samples, n_features) Data on which permutation importance will be computed. y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) Targets for supervised or `None` for unsupervised. scoring : string, callable or None, default=None Scorer to use. It can be a single string (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`). If None, the estimator's default scorer is used. n_repeats : int, default=5 Number of times to permute a feature. n_jobs : int or None, default=None The number of jobs to use for the computation. `None` means 1 unless in a :obj:`joblib.parallel_backend` context. `-1` means using all processors. See :term:`Glossary ` for more details. random_state : int, RandomState instance, or None, default=None Pseudo-random number generator to control the permutations of each feature. See :term:`random_state`. Returns ------- result : Bunch Dictionary-like object, with attributes: importances_mean : ndarray, shape (n_features, ) Mean of feature importance over `n_repeats`. importances_std : ndarray, shape (n_features, ) Standard deviation over `n_repeats`. importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores. Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.inspection import permutation_importance >>> X = [[1,9,9],[1,9,9],[1,9,9], ... [0,9,9],[0,9,9],[0,9,9]] >>> y = [1,1,1,0,0,0] >>> clf = LogisticRegression() >>> clf.fit(X,y) LogisticRegression() >>> result = permutation_importance(clf, X, y, n_repeats=10, ... random_state=42) >>> result.importances_mean array([0.5, 0. , 0. ]) >>> result.importances_std array([0.2236068, 0. , 0. ]) References ---------- .. [BRE] L. Breiman, ""Random Forests"", Machine Learning, 45(1), 5-32, 2001. https://doi.org/10.1023/A:1010933404324 """""" if not hasattr(X, ""iloc""): X = check_array(X, force_all_finite='allow-nan', dtype=None) # Precompute random seed from the random state to be used # to get a fresh independent RandomState instance for each # parallel call to _calculate_permutation_scores, irrespective of # the fact that variables are shared or not depending on the active # joblib backend (sequential, thread-based or process-based). random_state = check_random_state(random_state) random_seed = random_state.randint(np.iinfo(np.int32).max + 1) scorer = check_scoring(estimator, scoring=scoring) baseline_score = scorer(estimator, X, y) scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)( estimator, X, y, col_idx, random_seed, n_repeats, scorer ) for col_idx in range(X.shape[1])) importances = baseline_score - np.array(scores) return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances) " 31573,"def search_logs_command(client, args): query = args.get('query') time_range = args.get('time_range') if args.get('time_range') else 'Last 5 minutes' limit = args.get('limit') if args.get('limit') else 100 repos = argToList(args.get('repos')) if args.get('repos') else [] if limit: try: limit = int(limit) except ValueError: raise DemistoException(f""The provided argument '{limit}' for limit is not a valid integer."") result = client.get_search_id(query, time_range, limit, repos) if not result.get('success'): raise DemistoException(result['message']) search_id = result.get('search_id') search_result = client.get_search_results(search_id) if not search_result.get('success'): raise DemistoException(search_result['message']) rows = search_result.get('rows', []) display_title = f""Found {len(rows)} logs"" markdown = tableToMarkdown(display_title, rows, headers=None) return CommandResults( readable_output=markdown, outputs_prefix='LogPoint.SearchLogs', outputs=rows ) ","def search_logs_command(client, args): query = args.get('query') time_range = args.get('time_range') if args.get('time_range') else 'Last 5 minutes' limit = args.get('limit') if args.get('limit') else 100 repos = argToList(args.get('repos')) if args.get('repos') else [] if limit: try: limit = int(limit) except ValueError: raise DemistoException(f""The provided argument '{limit}' for limit is not a valid integer."") result = client.get_search_id(query, time_range, limit, repos) if not result.get('success'): raise DemistoException(result.get('message')) search_id = result.get('search_id') search_result = client.get_search_results(search_id) if not search_result.get('success'): raise DemistoException(search_result['message']) rows = search_result.get('rows', []) display_title = f""Found {len(rows)} logs"" markdown = tableToMarkdown(display_title, rows, headers=None) return CommandResults( readable_output=markdown, outputs_prefix='LogPoint.SearchLogs', outputs=rows ) " 10118,"def list_to_pg_array(elem): """"""Convert the passed list to PostgreSQL array represented as a string. Args: elem (list): List that needs to be converted. Returns: elem (str): String representation of PostgreSQL array. """""" elem = str(elem).strip('[]') elem = '{' + elem + '}' return elem ","def list_to_pg_array(elem): """"""Convert the passed list to PostgreSQL array represented as a string. Args: elem (list): List that needs to be converted. Returns: elem (str): String representation of PostgreSQL array. """""" elem = str(elem)[1:-1] elem = '{' + elem + '}' return elem " 38219,"def evaluate_molecules_off(molecules, forcefield, minimize=False): """""" Given a list of molecules and a force field definition, calculate the positions and energy. Parameters ---------- molecules: List[openff.toolkit.topology.molecule.Molecule] A list of molecules with a 3D conformation forcefield: openff.toolkit.typing.engines.smirnoff.forcefield.ForceField The force field object to parameterize with minimize: bool Whether the structure should be minimized Returns ------- xyz: List[List[float]] The coordinates of all particles in the system (OpenMM ordering) ene: float The potential energy """""" from openff.toolkit.topology import Topology # get the oFF topology and the positions top = Topology.from_molecules(molecules) atom_xyz = to_openmm(coords_from_off_mols(molecules, unit=unit.nanometer)) # IMPORTANT! The new system has the virtual sites, but the oFF top does not as there is no API yet to do so sys = forcefield.create_openmm_system(top) n_vptl = sys.getNumParticles() - len(atom_xyz) n_mol = len(molecules) atom_xyz = insert_vsite_padding(atom_xyz, n_mol, n_vptl, order=""OpenFF"") # Need an openMM topology, an openMM system, and a set of positions # to calculate the energy. Also returns the vsite positions based # on supplied atom coordinates xyz, ene = openmm_evaluate_vsites_and_energy( top.to_openmm(), sys, atom_xyz, minimize=minimize ) return xyz, ene ","def evaluate_molecules_off(molecules, forcefield, minimize=False): """""" Given a list of molecules and a force field definition, calculate the positions and energy. Parameters ---------- molecules: List[openff.toolkit.topology.molecule.Molecule] A list of molecules with a 3D conformation forcefield: openff.toolkit.typing.engines.smirnoff.forcefield.ForceField The force field object to parameterize with minimize: bool Whether the structure should be minimized Returns ------- xyz: List[List[float]] The coordinates of all particles in the system (OpenMM ordering) ene: float The potential energy """""" from openff.toolkit.topology import Topology # get the oFF topology and the positions top = Topology.from_molecules(molecules) atom_xyz = to_openmm(coords_from_off_mols(molecules, unit=unit.nanometer)) # IMPORTANT! The new system has the virtual sites, but the oFF top does not as there is no API yet to do so sys = forcefield.create_openmm_system(top, allow_nonintegral_charges=True) n_vptl = sys.getNumParticles() - len(atom_xyz) n_mol = len(molecules) atom_xyz = insert_vsite_padding(atom_xyz, n_mol, n_vptl, order=""OpenFF"") # Need an openMM topology, an openMM system, and a set of positions # to calculate the energy. Also returns the vsite positions based # on supplied atom coordinates xyz, ene = openmm_evaluate_vsites_and_energy( top.to_openmm(), sys, atom_xyz, minimize=minimize ) return xyz, ene " 8458,"def fibonacciGenerator(n = None): """""" Generating function up to n fibonacci numbers iteratively Params: n: int Return: int """""" f0, f1 = 0, 1 yield f1 while n == None or n > 1: fn = f0 + f1 yield fn f0, f1 = f1, fn n -= 1 ","def fibonacci_generator(n = None): """""" Generating function up to n fibonacci numbers iteratively Params: n: int Return: int """""" f0, f1 = 0, 1 yield f1 while n == None or n > 1: fn = f0 + f1 yield fn f0, f1 = f1, fn n -= 1 " 30113,"def jaccard_to_distance( jaccard, ksize, scaled, n_unique_kmers=None, sequence_len_bp=None, return_identity=False, prob_threshold=10.0 ** (-3), err_threshold=10.0 ** (-4.0), ): """""" Given parameters, calculate point estimate for mutation rate from jaccard index. First checks if parameters are valid (checks are not exhaustive). Then uses formulas derived mathematically to compute the point estimate. The formula uses approximations, therefore a tiny error is associated with it. A lower bound of that error is also returned. A high error indicates that the point estimate cannot be trusted. Threshold of the error is open to interpretation, but suggested that > 10^-4 should be handled with caution. Note that the error is NOT a mutation rate, and therefore cannot be considered in something like mut.rate +/- error. Arguments: jaccard, ksize, scaled, n_unique_kmers # Returns: tuple (point_estimate_of_mutation_rate, lower_bound_of_error) # Returns: point_estimate_of_mutation_rate Note: point estimate does not consider impact of scaled, but p_nothing_in_common can be useful for determining whether scaled is sufficient for these comparisons. """""" error_lower_bound = None if sequence_len_bp and not n_unique_kmers: n_unique_kmers = sequence_len_to_n_kmers(sequence_len_bp, ksize) if jaccard <= 0.0001: point_estimate = 1.0 error_lower_bound = 0.0 elif jaccard >= 0.9999: point_estimate = 0.0 error_lower_bound = 0.0 else: point_estimate = 1.0 - (2.0 * jaccard / float(1 + jaccard)) ** ( 1.0 / float(ksize) ) exp_n_mut = exp_n_mutated(n_unique_kmers, ksize, point_estimate) var_n_mut = var_n_mutated(n_unique_kmers, ksize, point_estimate) error_lower_bound = ( 1.0 * n_unique_kmers * var_n_mut / (n_unique_kmers + exp_n_mut) ** 3 ) if error_lower_bound is not None and error_lower_bound > err_threshold: notify( f""WARNING: Error on Jaccard distance point estimate is too high ({error_lower_bound})."" ) prob_nothing_in_common = get_exp_probability_nothing_common( point_estimate, ksize, scaled, n_unique_kmers=n_unique_kmers ) if prob_nothing_in_common >= prob_threshold: # to do: keep count and recommend user lower scaled val notify( ""WARNING: These sketches may have no hashes in common based on chance alone."" ) if return_identity: point_estimate = distance_to_identity(point_estimate) return point_estimate, prob_nothing_in_common, error_lower_bound ","def jaccard_to_distance( jaccard, ksize, scaled, n_unique_kmers=None, sequence_len_bp=None, return_identity=False, prob_threshold=1e-3, err_threshold=10.0 ** (-4.0), ): """""" Given parameters, calculate point estimate for mutation rate from jaccard index. First checks if parameters are valid (checks are not exhaustive). Then uses formulas derived mathematically to compute the point estimate. The formula uses approximations, therefore a tiny error is associated with it. A lower bound of that error is also returned. A high error indicates that the point estimate cannot be trusted. Threshold of the error is open to interpretation, but suggested that > 10^-4 should be handled with caution. Note that the error is NOT a mutation rate, and therefore cannot be considered in something like mut.rate +/- error. Arguments: jaccard, ksize, scaled, n_unique_kmers # Returns: tuple (point_estimate_of_mutation_rate, lower_bound_of_error) # Returns: point_estimate_of_mutation_rate Note: point estimate does not consider impact of scaled, but p_nothing_in_common can be useful for determining whether scaled is sufficient for these comparisons. """""" error_lower_bound = None if sequence_len_bp and not n_unique_kmers: n_unique_kmers = sequence_len_to_n_kmers(sequence_len_bp, ksize) if jaccard <= 0.0001: point_estimate = 1.0 error_lower_bound = 0.0 elif jaccard >= 0.9999: point_estimate = 0.0 error_lower_bound = 0.0 else: point_estimate = 1.0 - (2.0 * jaccard / float(1 + jaccard)) ** ( 1.0 / float(ksize) ) exp_n_mut = exp_n_mutated(n_unique_kmers, ksize, point_estimate) var_n_mut = var_n_mutated(n_unique_kmers, ksize, point_estimate) error_lower_bound = ( 1.0 * n_unique_kmers * var_n_mut / (n_unique_kmers + exp_n_mut) ** 3 ) if error_lower_bound is not None and error_lower_bound > err_threshold: notify( f""WARNING: Error on Jaccard distance point estimate is too high ({error_lower_bound})."" ) prob_nothing_in_common = get_exp_probability_nothing_common( point_estimate, ksize, scaled, n_unique_kmers=n_unique_kmers ) if prob_nothing_in_common >= prob_threshold: # to do: keep count and recommend user lower scaled val notify( ""WARNING: These sketches may have no hashes in common based on chance alone."" ) if return_identity: point_estimate = distance_to_identity(point_estimate) return point_estimate, prob_nothing_in_common, error_lower_bound " 31341,"def list_incident_events_command(client: Client, args: dict): """""" Get a specific incident's events by incident ID. Args: client (Client): Qualys FIM API client. args (dict): All command arguments. Returns: CommandResults: outputs, readable outputs and raw response for XSOAR. """""" params = remove_empty_elements({'filter': args.get('filter', None), 'pageNumber': args.get('page_number', None), 'pageSize': args.get('limit', None), 'attributes': args.get('attributes', None)}) raw_response = client.get_incident_events(args.get('incident_id', None), params) table_headers = ['id', 'name', 'severity', 'action', 'type', 'dateTime'] outputs = [] raw_outputs = [] if raw_response: for item in raw_response: data = item.get('data', None) if data: raw_outputs.append(data) item_dictionary = create_event_or_incident_output(data, table_headers) date_time = item_dictionary.get('dateTime', None) if date_time: date_time = datetime.strptime(date_time, DATETIME_FORMAT).strftime(TABLE_DATETIME_FORMAT) item_dictionary['dateTime'] = date_time outputs.append(item_dictionary) readable_output = tableToMarkdown(name=f'Listed {len(outputs)} Events From Incident:', t=outputs, headers=table_headers, removeNull=True) return CommandResults(outputs_prefix='QualysFIM.Event', outputs_key_field='id', raw_response=raw_response, outputs=raw_outputs, readable_output=readable_output) ","def list_incident_events_command(client: Client, args: dict): """""" Get a specific incident's events by incident ID. Args: client (Client): Qualys FIM API client. args (dict): All command arguments. Returns: CommandResults: outputs, readable outputs and raw response for XSOAR. """""" params = remove_empty_elements({'filter': args.get('filter', None), 'pageNumber': args.get('page_number', None), 'pageSize': args.get('limit', None), 'attributes': args.get('attributes', None)}) raw_response = client.get_incident_events(args['incident_id'], params) table_headers = ['id', 'name', 'severity', 'action', 'type', 'dateTime'] outputs = [] raw_outputs = [] if raw_response: for item in raw_response: data = item.get('data', None) if data: raw_outputs.append(data) item_dictionary = create_event_or_incident_output(data, table_headers) date_time = item_dictionary.get('dateTime', None) if date_time: date_time = datetime.strptime(date_time, DATETIME_FORMAT).strftime(TABLE_DATETIME_FORMAT) item_dictionary['dateTime'] = date_time outputs.append(item_dictionary) readable_output = tableToMarkdown(name=f'Listed {len(outputs)} Events From Incident:', t=outputs, headers=table_headers, removeNull=True) return CommandResults(outputs_prefix='QualysFIM.Event', outputs_key_field='id', raw_response=raw_response, outputs=raw_outputs, readable_output=readable_output) " 3285,"def load_data( platform, default=None, sample_name=None, timestamp=None, start_timestamp=None, trace=None, span=None, ): # NOTE: Before editing this data, make sure you understand the context # in which its being used. It is NOT only used for local development and # has production consequences. # * bin/load-mocks to generate fake data for local testing # * When a new project is created, a fake event is generated as a ""starter"" # event so it's not an empty project. # * When a user clicks Test Configuration from notification plugin settings page, # a fake event is generated to go through the pipeline. data = None language = None platform_data = INTEGRATION_ID_TO_PLATFORM_DATA.get(platform) if platform_data is not None and platform_data[""type""] != ""language"": language = platform_data[""language""] samples_root = os.path.join(DATA_ROOT, ""samples"") all_samples = set(f for f in os.listdir(samples_root) if f.endswith("".json"")) for platform in (platform, language, default): if not platform: continue # Verify by checking if the file is within our folder explicitly # avoids being able to have a name that invokes traversing directories. json_path = u""{}.json"".format(platform) if json_path not in all_samples: continue if not sample_name: try: sample_name = INTEGRATION_ID_TO_PLATFORM_DATA[platform][""name""] except KeyError: pass # XXX: At this point, it's assumed that `json_path` was safely found # within `samples_root` due to the check above and cannot traverse # into paths. with open(os.path.join(samples_root, json_path)) as fp: data = json.load(fp) break if data is None: return data = CanonicalKeyDict(data) if platform in (""csp"", ""hkpk"", ""expectct"", ""expectstaple""): return data # Generate a timestamp in the present. if timestamp is None: timestamp = timezone.now() else: timestamp = timestamp.replace(tzinfo=pytz.utc) data.setdefault(""timestamp"", to_timestamp(timestamp)) if data.get(""type"") == ""transaction"": if start_timestamp is None: start_timestamp = timestamp - timedelta(seconds=3) else: start_timestamp = start_timestamp.replace(tzinfo=pytz.utc) data[""start_timestamp""] = to_timestamp(start_timestamp) if trace is None: trace = uuid4().hex if span is None: span = uuid4().hex[:16] for tag in data[""tags""]: if tag[0] == ""trace"": tag[1] = trace elif tag[0] == ""trace.ctx"": tag[1] = trace + ""-"" + span elif tag[0] == ""trace.span"": tag[1] = span data[""contexts""][""trace""][""trace_id""] = trace data[""contexts""][""trace""][""span_id""] = span for span in data.get(""spans"", []): # Use data to generate span timestamps consistently and based # on event timestamp duration = span.get(""data"", {}).get(""duration"", 10.0) offset = span.get(""data"", {}).get(""offset"", 0) span_start = data[""start_timestamp""] + offset span[""trace_id""] = trace span.setdefault(""start_timestamp"", span_start) span.setdefault(""timestamp"", span_start + duration) measurements = data.get(""measurements"") if measurements: measurement_markers = {} for key, entry in measurements.items(): if key in [""fp"", ""fcp"", ""lcp"", ""fid""]: measurement_markers[""mark.{}"".format(key)] = { ""value"": round(data[""start_timestamp""] + entry[""value""] / 1000) } measurements.update(measurement_markers) data[""platform""] = platform # XXX: Message is a legacy alias for logentry. Do not overwrite if set. if ""message"" not in data: data[""message""] = ""This is an example %s exception"" % (sample_name or platform,) data.setdefault( ""user"", generate_user(ip_address=""127.0.0.1"", username=""sentry"", id=1, email=""sentry@example.com""), ) data.setdefault( ""extra"", { ""session"": {""foo"": ""bar""}, ""results"": [1, 2, 3, 4, 5], ""emptyList"": [], ""emptyMap"": {}, ""length"": 10837790, ""unauthorized"": False, ""url"": ""http://example.org/foo/bar/"", }, ) data.setdefault(""modules"", {""my.package"": ""1.0.0""}) data.setdefault( ""request"", { ""cookies"": ""foo=bar;biz=baz"", ""url"": ""http://example.com/foo"", ""headers"": { ""Referer"": ""http://example.com"", ""Content-Type"": ""application/json"", ""User-Agent"": ""Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36"", }, ""env"": {""ENV"": ""prod""}, ""query_string"": ""foo=bar"", ""data"": '{""hello"": ""world""}', ""method"": ""GET"", }, ) return data ","def load_data( platform, default=None, sample_name=None, timestamp=None, start_timestamp=None, trace=None, span=None, ): # NOTE: Before editing this data, make sure you understand the context # in which its being used. It is NOT only used for local development and # has production consequences. # * bin/load-mocks to generate fake data for local testing # * When a new project is created, a fake event is generated as a ""starter"" # event so it's not an empty project. # * When a user clicks Test Configuration from notification plugin settings page, # a fake event is generated to go through the pipeline. data = None language = None platform_data = INTEGRATION_ID_TO_PLATFORM_DATA.get(platform) if platform_data is not None and platform_data[""type""] != ""language"": language = platform_data[""language""] samples_root = os.path.join(DATA_ROOT, ""samples"") all_samples = set(f for f in os.listdir(samples_root) if f.endswith("".json"")) for platform in (platform, language, default): if not platform: continue # Verify by checking if the file is within our folder explicitly # avoids being able to have a name that invokes traversing directories. json_path = u""{}.json"".format(platform) if json_path not in all_samples: continue if not sample_name: try: sample_name = INTEGRATION_ID_TO_PLATFORM_DATA[platform][""name""] except KeyError: pass # XXX: At this point, it's assumed that `json_path` was safely found # within `samples_root` due to the check above and cannot traverse # into paths. with open(os.path.join(samples_root, json_path)) as fp: data = json.load(fp) break if data is None: return data = CanonicalKeyDict(data) if platform in (""csp"", ""hkpk"", ""expectct"", ""expectstaple""): return data # Generate a timestamp in the present. if timestamp is None: timestamp = timezone.now() else: timestamp = timestamp.replace(tzinfo=pytz.utc) data.setdefault(""timestamp"", to_timestamp(timestamp)) if data.get(""type"") == ""transaction"": if start_timestamp is None: start_timestamp = timestamp - timedelta(seconds=3) else: start_timestamp = start_timestamp.replace(tzinfo=pytz.utc) data[""start_timestamp""] = to_timestamp(start_timestamp) if trace is None: trace = uuid4().hex if span is None: span = uuid4().hex[:16] for tag in data[""tags""]: if tag[0] == ""trace"": tag[1] = trace elif tag[0] == ""trace.ctx"": tag[1] = trace + ""-"" + span elif tag[0] == ""trace.span"": tag[1] = span data[""contexts""][""trace""][""trace_id""] = trace data[""contexts""][""trace""][""span_id""] = span for span in data.get(""spans"", []): # Use data to generate span timestamps consistently and based # on event timestamp duration = span.get(""data"", {}).get(""duration"", 10.0) offset = span.get(""data"", {}).get(""offset"", 0) span_start = data[""start_timestamp""] + offset span[""trace_id""] = trace span.setdefault(""start_timestamp"", span_start) span.setdefault(""timestamp"", span_start + duration) measurements = data.get(""measurements"") if measurements: measurement_markers = {} for key, entry in measurements.items(): if key in [""fp"", ""fcp"", ""lcp"", ""fid""]: measurement_markers[""mark.{}"".format(key)] = { ""value"": data[""start_timestamp""] + entry[""value""] // 1000 } measurements.update(measurement_markers) data[""platform""] = platform # XXX: Message is a legacy alias for logentry. Do not overwrite if set. if ""message"" not in data: data[""message""] = ""This is an example %s exception"" % (sample_name or platform,) data.setdefault( ""user"", generate_user(ip_address=""127.0.0.1"", username=""sentry"", id=1, email=""sentry@example.com""), ) data.setdefault( ""extra"", { ""session"": {""foo"": ""bar""}, ""results"": [1, 2, 3, 4, 5], ""emptyList"": [], ""emptyMap"": {}, ""length"": 10837790, ""unauthorized"": False, ""url"": ""http://example.org/foo/bar/"", }, ) data.setdefault(""modules"", {""my.package"": ""1.0.0""}) data.setdefault( ""request"", { ""cookies"": ""foo=bar;biz=baz"", ""url"": ""http://example.com/foo"", ""headers"": { ""Referer"": ""http://example.com"", ""Content-Type"": ""application/json"", ""User-Agent"": ""Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36"", }, ""env"": {""ENV"": ""prod""}, ""query_string"": ""foo=bar"", ""data"": '{""hello"": ""world""}', ""method"": ""GET"", }, ) return data " 5397,"def _ip_route_linux(): """""" Return ip routing information for Linux distros (netstat is deprecated and may not be available) """""" # table main closest to old netstat inet output ret = [] cmd = ""ip -4 route show table main"" out = __salt__[""cmd.run""](cmd, python_shell=True) ip_route_linux_pattern = ( r""^(?Punreachable)?\s*(?P"" r""default|\S+)\s*(via)?\s*(?P\S+)?\s+"" r""(dev)?\s+(?P\S+).*"" ) p = re.compile(ip_route_linux_pattern) for line in out.splitlines(): line = p.search(line) if line is None: continue # need to fake similar output to that provided by netstat # to maintain output format if line.group(""unreachable"") == ""unreachable"": continue if line.group(""network"") == ""default"": ip_interface = line.group(""interface"") ret.append( { ""addr_family"": ""inet"", ""destination"": ""0.0.0.0"", ""gateway"": line.group(""gateway""), ""netmask"": ""0.0.0.0"", ""flags"": ""UG"", ""interface"": ip_interface if ip_interface else """", } ) else: address_mask = convert_cidr(line.group(""network"")) ip_interface = line.group(""interface"") ip_gateway = line.group(""gateway"") if line.group(""gateway""): flags = ""UG"" else: flags = ""U"" ret.append( { ""addr_family"": ""inet"", ""destination"": address_mask[""network""], ""gateway"": ip_gateway if ip_gateway else ""0.0.0.0"", ""netmask"": address_mask[""netmask""], ""flags"": flags, ""interface"": ip_interface if ip_interface else """", } ) # table all closest to old netstat inet6 output cmd = ""ip -6 route show table all"" out = __salt__[""cmd.run""](cmd, python_shell=True) for line in out.splitlines(): line = p.search(line) if line is None: continue # need to fake similar output to that provided by netstat # to maintain output format if line.group(""unreachable"") == ""unreachable"": continue if line.group(""network"") == ""default"": ip_interface = line.group(""interface"") ip_gateway = line.group(""gateway"") ret.append( { ""addr_family"": ""inet6"", ""destination"": ""::/0"", ""gateway"": ip_gateway, ""netmask"": """", ""flags"": ""UG"", ""interface"": ip_interface if ip_interface else """", } ) elif line.group(""network"") == ""local"": ip_interface = line.group(""interface"") local_address = line.group(""gateway"") + ""/128"" ret.append( { ""addr_family"": ""inet6"", ""destination"": local_address, ""gateway"": ""::"", ""netmask"": """", ""flags"": ""U"", ""interface"": ip_interface if ip_interface else """", } ) else: ip_interface = line.group(""interface"") ip_gateway = line.group(""gateway"") if line.group(""gateway"") is None: flags = ""U"" else: flags = ""UG"" ret.append( { ""addr_family"": ""inet6"", ""destination"": line.group(""network""), ""gateway"": ip_gateway if ip_gateway else ""::"", ""netmask"": """", ""flags"": flags, ""interface"": ip_interface if ip_interface else """", } ) return ret ","def _ip_route_linux(): """""" Return ip routing information for Linux distros (netstat is deprecated and may not be available) """""" # table main closest to old netstat inet output ret = [] cmd = ""ip -4 route show table main"" out = __salt__[""cmd.run""](cmd, python_shell=True) ip_route_linux_pattern = ( r""^(?Punreachable)?\s*(?P"" r""default|\S+)\s*(via)?\s*(?P\S+)?\s+"" r""(dev)?\s+(?P\S+).*"" ) p = re.compile(ip_route_linux_pattern) for line in out.splitlines(): line = p.search(line) if line is None: continue # need to fake similar output to that provided by netstat # to maintain output format if line.group(""unreachable"") == ""unreachable"": continue if line.group(""network"") == ""default"": ip_interface = line.group(""interface"") ret.append( { ""addr_family"": ""inet"", ""destination"": ""0.0.0.0"", ""gateway"": line.group(""gateway""), ""netmask"": ""0.0.0.0"", ""flags"": ""UG"", ""interface"": ip_interface if ip_interface else """", } ) else: address_mask = convert_cidr(line.group(""network"")) ip_interface = line.group(""interface"") ip_gateway = line.group(""gateway"") if line.group(""gateway""): flags = ""UG"" else: flags = ""U"" ret.append( { ""addr_family"": ""inet"", ""destination"": address_mask[""network""], ""gateway"": ip_gateway if ip_gateway else ""0.0.0.0"", ""netmask"": address_mask[""netmask""], ""flags"": flags, ""interface"": ip_interface if ip_interface else """", } ) # table all closest to old netstat inet6 output cmd = ""ip -6 route show table all"" out = __salt__[""cmd.run""](cmd, python_shell=True) for line in out.splitlines(): line = p.search(line) if line is None: continue # need to fake similar output to that provided by netstat # to maintain output format if line.group(""unreachable"") == ""unreachable"": continue if line.group(""network"") == ""default"": ip_interface = line.group(""interface"") ip_gateway = line.group(""gateway"") ret.append( { ""addr_family"": ""inet6"", ""destination"": ""::/0"", ""gateway"": ip_gateway, ""netmask"": """", ""flags"": ""UG"", ""interface"": ip_interface if ip_interface else """", } ) elif line.group(""network"") == ""local"": ip_interface = line.group(""interface"") local_address = line.group(""gateway"") + ""/128"" ret.append( { ""addr_family"": ""inet6"", ""destination"": local_address, ""gateway"": ""::"", ""netmask"": """", ""flags"": ""U"", ""interface"": ip_interface if ip_interface else """", } ) else: ip_interface = line.group(""interface"") ip_gateway = line.group(""gateway"") if ip_gateway is None: flags = ""U"" else: flags = ""UG"" ret.append( { ""addr_family"": ""inet6"", ""destination"": line.group(""network""), ""gateway"": ip_gateway if ip_gateway else ""::"", ""netmask"": """", ""flags"": flags, ""interface"": ip_interface if ip_interface else """", } ) return ret " 53638,"def _looks_like_subscriptable(node: ClassDef) -> bool: """""" Returns True if the node corresponds to a ClassDef of the Collections.abc module that. supports subscripting. :param node: ClassDef node """""" if node.qname().startswith(""_collections"") or node.qname().startswith( ""collections"" ): try: node.getattr(""__class_getitem__"") return True except AttributeInferenceError: pass return False ","def _looks_like_subscriptable(node: ClassDef) -> bool: """""" Returns True if the node corresponds to a ClassDef of the Collections.abc module that supports subscripting. :param node: ClassDef node """""" if node.qname().startswith(""_collections"") or node.qname().startswith( ""collections"" ): try: node.getattr(""__class_getitem__"") return True except AttributeInferenceError: pass return False " 27260,"def schema( pairs: Iterable[tuple[str, dt.DataType]] | dict[str, dt.DataType] | None = None, names: Iterable[str] | None = None, types: Iterable[str | dt.DataType] | None = None, ) -> sch.Schema: """"""Validate and return an :class:`~ibis.expr.schema.Schema` object. Parameters ---------- pairs List or dictionary of name, type pairs. Mutually exclusive with `names` and `types`. names Field names. Mutually exclusive with `pairs`. types Field types. Mutually exclusive with `pairs`. Examples -------- >>> from ibis import schema >>> sc = schema([('foo', 'string'), ... ('bar', 'int64'), ... ('baz', 'boolean')]) >>> sc2 = schema(names=['foo', 'bar', 'baz'], ... types=['string', 'int64', 'boolean']) """""" if pairs is not None: if isinstance(pairs, dict): return Schema.from_dict(pairs) return Schema.from_tuples(pairs) else: return Schema(names, types) ","def schema( pairs: Iterable[tuple[str, dt.DataType]] | dict[str, dt.DataType] | None = None, names: Iterable[str] | None = None, types: Iterable[str | dt.DataType] | None = None, ) -> sch.Schema: """"""Validate and return an :class:`~ibis.expr.schema.Schema` object. Parameters ---------- pairs List or dictionary of name, type pairs. Mutually exclusive with `names` and `types`. names Field names. Mutually exclusive with `pairs`. types Field types. Mutually exclusive with `pairs`. Examples -------- >>> from ibis import schema >>> sc = schema([('foo', 'string'), ... ('bar', 'int64'), ... ('baz', 'boolean')]) >>> sc2 = schema(names=['foo', 'bar', 'baz'], ... types=['string', 'int64', 'boolean']) """""" if pairs is not None: return Schema.from_dict(dict(pairs)) else: return Schema(names, types) " 41300,"def maketodo( pipfile, dbfile, intre, exclude_re=None, balance_wire_re=None, balance_wire_direction=None, balance_wire_cnt=None, not_endswith=None, verbose=False): ''' db files start with INT., but pipfile lines start with INT_L Normalize by removing before the first dot 050-intpips doesn't care about contents, but most fuzzers use the tile type prefix ''' todos, tile_type = load_pipfile(pipfile, verbose=verbose) verbose and print( '%s: %u entries' % (pipfile, len(todos)), file=sys.stderr) if not todos: verbose and print( '%s: %u entries, done!' % (pipfile, len(todos)), file=sys.stderr) return verbose and print( ""pipfile todo sample: %s"" % list(todos)[0], file=sys.stderr) if 0 and verbose: print(""TODOs"", file=sys.stderr) for todo in sorted(list(todos)): print(' %s' % todo) verbose and print( 'Pre db %s: %u entries' % (dbfile, len(todos)), file=sys.stderr) # Allow against empty db if os.path.exists(dbfile): verbose and print(""Loading %s"" % dbfile, file=sys.stderr) with open(dbfile, ""r"") as f: # INT.BYP_ALT0.BYP_BOUNCE_N3_3 !22_07 !23_07 !25_07 21_07 24_07 for line in f: tag, _bits, mode, _ = util.parse_db_line(line.strip()) # Only count resolved entries if mode: continue # INT.BLAH => INT_L.BLAH tag = tile_type + '.' + noprefix(tag) # bipips works on a subset if tag in todos: todos.remove(tag) else: verbose and print( ""WARNING: couldnt remove %s (line %s)"" % (tag, line.strip()), file=sys.stderr) else: verbose and print( ""WARNING: dbfile doesnt exist: %s"" % dbfile, file=sys.stderr) verbose and print( 'Post db %s: %u entries' % (dbfile, len(todos)), file=sys.stderr) drops = 0 lines = 0 filtered_todos = set() for line in todos: include = re.match(intre, line) is not None if include and not_endswith is not None: include = not line.endswith(not_endswith) if include and exclude_re is not None: include = re.match(exclude_re, line) is None if include: filtered_todos.add(line) else: drops += 1 lines += 1 verbose and print( 'Print %u entries w/ %u drops' % (lines, drops), file=sys.stderr) balance_todo_list( pipfile, filtered_todos, balance_wire_re, balance_wire_direction, balance_wire_cnt, verbose) for todo in filtered_todos: print(todo) ","def maketodo( pipfile, dbfile, intre, exclude_re=None, balance_wire_re=None, balance_wire_direction=None, balance_wire_cnt=None, not_endswith=None, verbose=False): ''' db files start with INT., but pipfile lines start with INT_L Normalize by removing before the first dot 050-intpips doesn't care about contents, but most fuzzers use the tile type prefix ''' todos, tile_type = load_pipfile(pipfile, verbose=verbose) verbose and print( '%s: %u entries' % (pipfile, len(todos)), file=sys.stderr) if not todos: verbose and print( '%s: %u entries, done!' % (pipfile, len(todos)), file=sys.stderr) return verbose and print( ""pipfile todo sample: %s"" % list(todos)[0], file=sys.stderr) if verbose: print(""TODOs"", file=sys.stderr) for todo in sorted(list(todos)): print(' %s' % todo) verbose and print( 'Pre db %s: %u entries' % (dbfile, len(todos)), file=sys.stderr) # Allow against empty db if os.path.exists(dbfile): verbose and print(""Loading %s"" % dbfile, file=sys.stderr) with open(dbfile, ""r"") as f: # INT.BYP_ALT0.BYP_BOUNCE_N3_3 !22_07 !23_07 !25_07 21_07 24_07 for line in f: tag, _bits, mode, _ = util.parse_db_line(line.strip()) # Only count resolved entries if mode: continue # INT.BLAH => INT_L.BLAH tag = tile_type + '.' + noprefix(tag) # bipips works on a subset if tag in todos: todos.remove(tag) else: verbose and print( ""WARNING: couldnt remove %s (line %s)"" % (tag, line.strip()), file=sys.stderr) else: verbose and print( ""WARNING: dbfile doesnt exist: %s"" % dbfile, file=sys.stderr) verbose and print( 'Post db %s: %u entries' % (dbfile, len(todos)), file=sys.stderr) drops = 0 lines = 0 filtered_todos = set() for line in todos: include = re.match(intre, line) is not None if include and not_endswith is not None: include = not line.endswith(not_endswith) if include and exclude_re is not None: include = re.match(exclude_re, line) is None if include: filtered_todos.add(line) else: drops += 1 lines += 1 verbose and print( 'Print %u entries w/ %u drops' % (lines, drops), file=sys.stderr) balance_todo_list( pipfile, filtered_todos, balance_wire_re, balance_wire_direction, balance_wire_cnt, verbose) for todo in filtered_todos: print(todo) " 20168,"def avoid_mailto_callback(attrs, new=False): """""" Remove completely the link containing a `mailto` to avoid spam. If case of a bad markdown formating for links, the href will not be found in attr and a KeyError will be raised. We chose to catch the exception and just display the text of the link alone. """""" try: if attrs[(None, 'href')].startswith('mailto:'): return None except KeyError: pass return attrs ","def avoid_mailto_callback(attrs, new=False): """""" Remove completely the link containing a `mailto` to avoid spam. In case of a bad markdown formatting for links, the href will not be found in attr and a KeyError will be raised. We chose to catch the exception and just display the text of the link alone. """""" try: if attrs[(None, 'href')].startswith('mailto:'): return None except KeyError: pass return attrs " 42997,"def update_from_environment_variables(config): """"""Updates the current configuration object from data stored in environment variables. .. note:: Currently the following environment variables are checked: * SF_API_AUTHENTICATION_TOKEN * SF_API_HOSTNAME * SF_API_USE_SSL * SF_API_DEBUG * SF_API_PORT Args: config (dict[str, dict[str, Union[str, bool, int]]]): the configuration to be updated Returns: dict[str, dict[str, Union[str, bool, int]]]): the updated configuration """""" for section, sectionconfig in config.items(): env_prefix = ""SF_{}_"".format(section.upper()) for key in sectionconfig: env = env_prefix + key.upper() if env in os.environ: config[section][key] = parse_environment_variable(key, os.environ[env]) ","def update_from_environment_variables(config): """"""Updates the current configuration object from data stored in environment variables. .. note:: The following environment variables are checked: * SF_API_AUTHENTICATION_TOKEN * SF_API_HOSTNAME * SF_API_USE_SSL * SF_API_DEBUG * SF_API_PORT Args: config (dict[str, dict[str, Union[str, bool, int]]]): the configuration to be updated Returns: dict[str, dict[str, Union[str, bool, int]]]): the updated configuration """""" for section, sectionconfig in config.items(): env_prefix = ""SF_{}_"".format(section.upper()) for key in sectionconfig: env = env_prefix + key.upper() if env in os.environ: config[section][key] = parse_environment_variable(key, os.environ[env]) " 30641,"def parse_outputs( api_res: Dict, meta_fields: list = [], quota_fields: list = [], resources_fields: list = [], sandbox_filds: list = [] ) -> Dict[str, dict]: """"""Parse group data as received from CrowdStrike FalconX API into Demisto's conventions the output from the API is a dict that contains the keys: meta, resources and errors the meta contains a ""quota"" dict the ""resources"" is an array that contains the sandbox dict the function filters the wanted params from the api result :param api_res: the api result from the http request :param meta_fields: the wanted params that appear in the mate section :param quota_fields: the wanted params that appear in the quota section :param resources_fields: the wanted params that appear in the resources section :param sandbox_filds: the wanted params that appear in the sandbox section :return: a dict based on api_res with the wanted params only """""" if api_res.get(""errors""): # if there is an error in the api result, return only the error return api_res.get(""errors"") api_res_meta, api_res_quota, api_res_resources, api_res_sandbox = {}, {}, {}, {} resources_group_outputs, sandbox_group_outputs = {}, {} api_res_meta = api_res.get(""meta"") if api_res_meta: api_res_quota = api_res_meta.get(""quota"") meta_group_outputs = add_outputs_from_dict(api_res_meta, meta_fields) quota_group_outputs = add_outputs_from_dict(api_res_quota, quota_fields) if api_res.get(""resources""): # depended on the command, the resources section can be a str list or a list that contains # only one argument which is a dict if type(api_res.get(""resources"")[0]) == dict: api_res_resources = api_res.get(""resources"")[0] resources_group_outputs = add_outputs_from_dict(api_res_resources, resources_fields) if api_res_resources and api_res_resources.get(""sandbox""): api_res_sandbox = api_res_resources.get(""sandbox"")[0] sandbox_group_outputs = add_outputs_from_dict(api_res_sandbox, sandbox_filds) else: # the resources section is a list of strings resources_group_outputs = {""resources"": api_res.get(""resources"")} merged_dicts = {**meta_group_outputs, **quota_group_outputs, **resources_group_outputs, **sandbox_group_outputs} return {f'csfalconx.resource(val.resource === obj.resource)': merged_dicts} ","def parse_outputs( api_res: Dict, meta_fields: list = [], quota_fields: list = [], resources_fields: list = [], sandbox_fileds: list = [] ) -> Dict[str, dict]: """"""Parse group data as received from CrowdStrike FalconX API into Demisto's conventions the output from the API is a dict that contains the keys: meta, resources and errors the meta contains a ""quota"" dict the ""resources"" is an array that contains the sandbox dict the function filters the wanted params from the api result :param api_res: the api result from the http request :param meta_fields: the wanted params that appear in the mate section :param quota_fields: the wanted params that appear in the quota section :param resources_fields: the wanted params that appear in the resources section :param sandbox_filds: the wanted params that appear in the sandbox section :return: a dict based on api_res with the wanted params only """""" if api_res.get(""errors""): # if there is an error in the api result, return only the error return api_res.get(""errors"") api_res_meta, api_res_quota, api_res_resources, api_res_sandbox = {}, {}, {}, {} resources_group_outputs, sandbox_group_outputs = {}, {} api_res_meta = api_res.get(""meta"") if api_res_meta: api_res_quota = api_res_meta.get(""quota"") meta_group_outputs = add_outputs_from_dict(api_res_meta, meta_fields) quota_group_outputs = add_outputs_from_dict(api_res_quota, quota_fields) if api_res.get(""resources""): # depended on the command, the resources section can be a str list or a list that contains # only one argument which is a dict if type(api_res.get(""resources"")[0]) == dict: api_res_resources = api_res.get(""resources"")[0] resources_group_outputs = add_outputs_from_dict(api_res_resources, resources_fields) if api_res_resources and api_res_resources.get(""sandbox""): api_res_sandbox = api_res_resources.get(""sandbox"")[0] sandbox_group_outputs = add_outputs_from_dict(api_res_sandbox, sandbox_filds) else: # the resources section is a list of strings resources_group_outputs = {""resources"": api_res.get(""resources"")} merged_dicts = {**meta_group_outputs, **quota_group_outputs, **resources_group_outputs, **sandbox_group_outputs} return {f'csfalconx.resource(val.resource === obj.resource)': merged_dicts} " 42939,"def rbfkernel(R, sigma): r""""""This function generates a radial basis function (RBF) kernel matrix. The elements of the RBF kernel are computed as: .. math:: K_{i,j} = e^{-\|\bf{r}_i-\bf{r}_j\|^2/(2*\sigma^2)}, where :math:`\bf{r}_i` is the coordinates of point :math:`i` and :math:`\sigma` is a constant. **Example usage:** >>> R = array([[0, 1], [1, 0], [0, 0], [1, 1]]) >>> sigma = 1.0 >>> rbfkernel (R, sigma) array([[1. , 0.36787944, 0.60653066, 0.60653066], [0.36787944, 1. , 0.60653066, 0.60653066], [0.60653066, 0.60653066, 1. , 0.36787944], [0.60653066, 0.60653066, 0.36787944, 1. ]]) Args: R (array): coordinates of the points. sigma (float): a constant. Returns: K (array): the kernel matrix. """""" K = np.exp(-(cdist(R, R)) ** 2 / 2 / sigma ** 2) return K ","def rbfkernel(R, sigma): r""""""This function generates a radial basis function (RBF) kernel matrix. The elements of the RBF kernel are computed as: .. math:: K_{i,j} = e^{-\|\bf{r}_i-\bf{r}_j\|^2/(2*\sigma^2)}, where :math:`\bf{r}_i` is the coordinates of point :math:`i` and :math:`\sigma` is a constant. **Example usage:** >>> R = array([[0, 1], [1, 0], [0, 0], [1, 1]]) >>> sigma = 1.0 >>> rbfkernel (R, 1.0) array([[1. , 0.36787944, 0.60653066, 0.60653066], [0.36787944, 1. , 0.60653066, 0.60653066], [0.60653066, 0.60653066, 1. , 0.36787944], [0.60653066, 0.60653066, 0.36787944, 1. ]]) Args: R (array): coordinates of the points. sigma (float): a constant. Returns: K (array): the kernel matrix. """""" K = np.exp(-(cdist(R, R)) ** 2 / 2 / sigma ** 2) return K " 35280,"def non_negative_parafac_hals(tensor, rank, n_iter_max=100, init=""svd"", svd='numpy_svd', tol=1e-7, sparsity_coefficients=[], fixed_modes=[],hals='approx', verbose=False, return_errors=False): """""" Non-negative CP decomposition Uses HALS which updates each factor columnwise, fixing every other columns, see [1]_ Parameters ---------- tensor : ndarray rank : int number of components n_iter_max : int maximum number of iteration init : {'svd', 'random'}, optional svd : str, default is 'numpy_svd' function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS tol : float, optional tolerance: the algorithm stops when the variation in the reconstruction error is less than the tolerance Default: 1e-8 sparsity_coefficients: array of float (of length the number of modes) The sparsity coefficients on each factor. If set to None, the algorithm is computed without sparsity Default: [], fixed_modes: array of integers (between 0 and the number of modes) Has to be set not to update a factor, 0 and 1 for U and V respectively Default: [] verbose: boolean Indicates whether the algorithm prints the successive reconstruction errors or not Default: False return_errors: boolean Indicates whether the algorithm should return all reconstruction errors and computation time of each iteration or not Default: False Returns ------- factors : ndarray list list of positive factors of the CP decomposition element `i` is of shape ``(tensor.shape[i], rank)`` errors: list A list of reconstruction errors at each iteration of the algorithm. toc: list A list with accumulated time at each iterations fixed_modes = [], normalize = [False, False, False], verbose = True, return_errors = False) References ---------- [1]: N. Gillis and F. Glineur, Accelerated Multiplicative Updates and Hierarchical ALS Algorithms for Nonnegative Matrix Factorization, Neural Computation 24 (4): 1085-1105, 2012. """""" weights, factors = initialize_nn_cp(tensor, rank, init=init, svd=svd, random_state=None, normalize_factors=False) norm_tensor = tl.norm(tensor, 2) nb_modes = len(tensor.shape) if sparsity_coefficients == None or len(sparsity_coefficients) != nb_modes: #print( # ""Irrelevant number of sparsity coefficient (different from the number of modes), they have been set to None."") sparsity_coefficients = [None for i in range(nb_modes)] if fixed_modes == None: fixed_modes = [] # Avoiding errors for fixed_value in fixed_modes: sparsity_coefficients[fixed_value] = None # Generating the mode update sequence modes_list = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes] # initialisation - declare local varaibles rec_errors = [] # Iteratation for iteration in range(n_iter_max): # One pass of least squares on each updated mode for mode in modes_list: # Computing Hadamard of cross-products pseudo_inverse = tl.tensor(tl.ones((rank, rank)), **tl.context(tensor)) for i, factor in enumerate(factors): if i != mode: pseudo_inverse = pseudo_inverse*tl.dot(tl.transpose(factor), factor) if not iteration and weights is not None: # Take into account init weights mttkrp = unfolding_dot_khatri_rao(tensor, (weights, factors), mode) else: mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode) # Call the hals resolution with nnls, optimizing the current mode if hals=='approx': factors[mode] = tl.transpose( hals_nnls_approx(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]), maxiter=100,sparsity_coefficient=sparsity_coefficients[mode])[0]) elif hals=='exact': factors[mode] = tl.transpose( hals_nnls_exact(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]), maxiter=5000)[0]) if tol: factors_norm = cp_norm((weights, factors)) iprod = tl.sum(tl.sum(mttkrp*factor, axis=0)*weights) rec_error = tl.sqrt(tl.abs(norm_tensor**2 + factors_norm**2 - 2*iprod)) / norm_tensor rec_errors.append(rec_error) if iteration > 1: if verbose: print('reconstruction error={}, variation={}.'.format( rec_errors[-1], rec_errors[-2] - rec_errors[-1])) if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol: if verbose: print('converged in {} iterations.'.format(iteration)) break cp_tensor = CPTensor((weights, factors)) if return_errors: return cp_tensor, rec_errors else: return cp_tensor ","def non_negative_parafac_hals(tensor, rank, n_iter_max=100, init=""svd"", svd='numpy_svd', tol=1e-7, sparsity_coefficients=[], fixed_modes=[],hals='approx', verbose=False, return_errors=False): """""" Non-negative CP decomposition Uses Hierarchical ALS (Alternating Least Squares) which updates each factor column-wise (one column at a time while keeping all other columns fixed), see [1]_ Parameters ---------- tensor : ndarray rank : int number of components n_iter_max : int maximum number of iteration init : {'svd', 'random'}, optional svd : str, default is 'numpy_svd' function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS tol : float, optional tolerance: the algorithm stops when the variation in the reconstruction error is less than the tolerance Default: 1e-8 sparsity_coefficients: array of float (of length the number of modes) The sparsity coefficients on each factor. If set to None, the algorithm is computed without sparsity Default: [], fixed_modes: array of integers (between 0 and the number of modes) Has to be set not to update a factor, 0 and 1 for U and V respectively Default: [] verbose: boolean Indicates whether the algorithm prints the successive reconstruction errors or not Default: False return_errors: boolean Indicates whether the algorithm should return all reconstruction errors and computation time of each iteration or not Default: False Returns ------- factors : ndarray list list of positive factors of the CP decomposition element `i` is of shape ``(tensor.shape[i], rank)`` errors: list A list of reconstruction errors at each iteration of the algorithm. toc: list A list with accumulated time at each iterations fixed_modes = [], normalize = [False, False, False], verbose = True, return_errors = False) References ---------- [1]: N. Gillis and F. Glineur, Accelerated Multiplicative Updates and Hierarchical ALS Algorithms for Nonnegative Matrix Factorization, Neural Computation 24 (4): 1085-1105, 2012. """""" weights, factors = initialize_nn_cp(tensor, rank, init=init, svd=svd, random_state=None, normalize_factors=False) norm_tensor = tl.norm(tensor, 2) nb_modes = len(tensor.shape) if sparsity_coefficients == None or len(sparsity_coefficients) != nb_modes: #print( # ""Irrelevant number of sparsity coefficient (different from the number of modes), they have been set to None."") sparsity_coefficients = [None for i in range(nb_modes)] if fixed_modes == None: fixed_modes = [] # Avoiding errors for fixed_value in fixed_modes: sparsity_coefficients[fixed_value] = None # Generating the mode update sequence modes_list = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes] # initialisation - declare local varaibles rec_errors = [] # Iteratation for iteration in range(n_iter_max): # One pass of least squares on each updated mode for mode in modes_list: # Computing Hadamard of cross-products pseudo_inverse = tl.tensor(tl.ones((rank, rank)), **tl.context(tensor)) for i, factor in enumerate(factors): if i != mode: pseudo_inverse = pseudo_inverse*tl.dot(tl.transpose(factor), factor) if not iteration and weights is not None: # Take into account init weights mttkrp = unfolding_dot_khatri_rao(tensor, (weights, factors), mode) else: mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode) # Call the hals resolution with nnls, optimizing the current mode if hals=='approx': factors[mode] = tl.transpose( hals_nnls_approx(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]), maxiter=100,sparsity_coefficient=sparsity_coefficients[mode])[0]) elif hals=='exact': factors[mode] = tl.transpose( hals_nnls_exact(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]), maxiter=5000)[0]) if tol: factors_norm = cp_norm((weights, factors)) iprod = tl.sum(tl.sum(mttkrp*factor, axis=0)*weights) rec_error = tl.sqrt(tl.abs(norm_tensor**2 + factors_norm**2 - 2*iprod)) / norm_tensor rec_errors.append(rec_error) if iteration > 1: if verbose: print('reconstruction error={}, variation={}.'.format( rec_errors[-1], rec_errors[-2] - rec_errors[-1])) if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol: if verbose: print('converged in {} iterations.'.format(iteration)) break cp_tensor = CPTensor((weights, factors)) if return_errors: return cp_tensor, rec_errors else: return cp_tensor " 28596,"def plot_pair( data, group=""posterior"", var_names: Optional[List[str]] = None, filter_vars: Optional[str] = None, coords=None, marginals=False, figsize=None, textsize=None, kind: Union[str, List[str]] = ""scatter"", gridsize=""auto"", contour: Optional[bool] = None, plot_kwargs=None, fill_last=False, divergences=False, colorbar=False, labeller=None, ax=None, divergences_kwargs=None, scatter_kwargs=None, kde_kwargs=None, hexbin_kwargs=None, backend=None, backend_kwargs=None, marginal_kwargs=None, point_estimate=None, point_estimate_kwargs=None, point_estimate_marker_kwargs=None, reference_values=None, reference_values_kwargs=None, show=None, ): """""" Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal. Parameters ---------- data: obj Any object that can be converted to an :class:`az.InferenceData` object refer to documentation of :func:`az.convert_to_dataset` for details group: str, optional Specifies which InferenceData group should be plotted. Defaults to 'posterior'. var_names: list of variable names, optional Variables to be plotted, if None all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, ""like"", ""regex""}, optional, default=None If ``None`` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords: mapping, optional Coordinates of var_names to be plotted. Passed to `Dataset.sel` marginals: bool, optional If True pairplot will include marginal distributions for every variable figsize: figure size tuple If None, size is (8 + numvars, 8 + numvars) textsize: int Text size for labels. If None it will be autoscaled based on figsize. kind : str or List[str] Type of plot to display (scatter, kde and/or hexbin) gridsize: int or (int, int), optional Only works for kind=hexbin. The number of hexagons in the x-direction. The corresponding number of hexagons in the y-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. contour : bool, optional, deprecated, Defaults to True. If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True. **Note:** this default is implemented in the body of the code, not in argument processing. fill_last : bool If True fill the last contour of the 2D KDE plot. Defaults to True. divergences: Boolean If True divergences will be plotted in a different color, only if group is either 'prior' or 'posterior'. colorbar: bool If True a colorbar will be included as part of the plot (Defaults to False). Only works when kind=hexbin labeller : labeller instance, optional Class providing the method `make_label_vert` to generate the labels in the plot. Read the :ref:`label_guide` for more details and usage examples. ax: axes, optional Matplotlib axes or bokeh figures. divergences_kwargs: dicts, optional Additional keywords passed to ``ax.scatter`` for divergences scatter_kwargs: Additional keywords passed to ``ax.plot`` when using scatter kind kde_kwargs: dict, optional Additional keywords passed to :func:`az.plot_kde` when using kde kind hexbin_kwargs: dict, optional Additional keywords passed to ``ax.hexbin`` when using hexbin kind backend: str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default ""matplotlib"". backend_kwargs: bool, optional These are kwargs specific to the backend being used. For additional documentation check the plotting method of the backend. marginal_kwargs: dict, optional Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions plotted in the diagonal. point_estimate: str, optional Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be plotted using a scatter marker and vertical/horizontal lines. point_estimate_kwargs: dict, optional Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh) point_estimate_marker_kwargs: dict, optional Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh reference_values: dict, optional Reference values for the plotted variables. The Reference values will be plotted using a scatter marker reference_values_kwargs: dict, optional Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures Examples -------- KDE Pair Plot .. plot:: :context: close-figs >>> import arviz as az >>> centered = az.load_arviz_data('centered_eight') >>> coords = {'school': ['Choate', 'Deerfield']} >>> az.plot_pair(centered, >>> var_names=['theta', 'mu', 'tau'], >>> kind='kde', >>> coords=coords, >>> divergences=True, >>> textsize=18) Hexbin pair plot .. plot:: :context: close-figs >>> az.plot_pair(centered, >>> var_names=['theta', 'mu'], >>> coords=coords, >>> textsize=18, >>> kind='hexbin') Pair plot showing divergences and select variables with regular expressions .. plot:: :context: close-figs >>> az.plot_pair(centered, ... var_names=['^t', 'mu'], ... filter_vars=""regex"", ... coords=coords, ... divergences=True, ... textsize=18) """""" valid_kinds = [""scatter"", ""kde"", ""hexbin""] kind_boolean: Union[bool, List[bool]] if isinstance(kind, str): kind_boolean = kind in valid_kinds else: kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))] if not np.all(kind_boolean): raise ValueError((f""Plot type {kind} not recognized."" ""Plot type must be in {valid_kinds}"")) if fill_last or contour: warnings.warn( ""fill_last and contour will be deprecated. Please use kde_kwargs"", UserWarning, ) if plot_kwargs: warnings.warn( ""plot_kwargs will be deprecated."" "" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs"", UserWarning, ) if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() # Get posterior draws and combine chains dataset = convert_to_dataset(data, group=group) var_names = _var_names(var_names, dataset, filter_vars) plotters = list( xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True) ) flat_var_names = [ labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters ] divergent_data = None diverging_mask = None # Assigning divergence group based on group param if group == ""posterior"": divergent_group = ""sample_stats"" elif group == ""prior"": divergent_group = ""sample_stats_prior"" else: divergences = False # Get diverging draws and combine chains if divergences: if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), ""diverging""): divergent_data = convert_to_dataset(data, group=divergent_group) _, diverging_mask = xarray_to_ndarray( divergent_data, var_names=(""diverging"",), combined=True ) diverging_mask = np.squeeze(diverging_mask) else: divergences = False warnings.warn( ""Divergences data not found, plotting without divergences. "" ""Make sure the sample method provides divergences data and "" ""that it is present in the `diverging` field of `sample_stats` "" ""or `sample_stats_prior` or set divergences=False"", UserWarning, ) if gridsize == ""auto"": gridsize = int(dataset.dims[""draw""] ** 0.35) numvars = len(flat_var_names) if numvars < 2: raise ValueError(""Number of variables to be plotted must be 2 or greater."") pairplot_kwargs = dict( ax=ax, plotters=plotters, numvars=numvars, figsize=figsize, textsize=textsize, kind=kind, scatter_kwargs=scatter_kwargs, kde_kwargs=kde_kwargs, hexbin_kwargs=hexbin_kwargs, gridsize=gridsize, colorbar=colorbar, divergences=divergences, diverging_mask=diverging_mask, divergences_kwargs=divergences_kwargs, flat_var_names=flat_var_names, backend_kwargs=backend_kwargs, marginal_kwargs=marginal_kwargs, show=show, marginals=marginals, point_estimate=point_estimate, point_estimate_kwargs=point_estimate_kwargs, point_estimate_marker_kwargs=point_estimate_marker_kwargs, reference_values=reference_values, reference_values_kwargs=reference_values_kwargs, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function(""plot_pair"", ""pairplot"", backend) ax = plot(**pairplot_kwargs) return ax ","def plot_pair( data, group=""posterior"", var_names: Optional[List[str]] = None, filter_vars: Optional[str] = None, coords=None, marginals=False, figsize=None, textsize=None, kind: Union[str, List[str]] = ""scatter"", gridsize=""auto"", contour: Optional[bool] = None, plot_kwargs=None, fill_last=False, divergences=False, colorbar=False, labeller=None, ax=None, divergences_kwargs=None, scatter_kwargs=None, kde_kwargs=None, hexbin_kwargs=None, backend=None, backend_kwargs=None, marginal_kwargs=None, point_estimate=None, point_estimate_kwargs=None, point_estimate_marker_kwargs=None, reference_values=None, reference_values_kwargs=None, show=None, ): """""" Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal. Parameters ---------- data: obj Any object that can be converted to an :class:`az.InferenceData` object refer to documentation of :func:`az.convert_to_dataset` for details group: str, optional Specifies which InferenceData group should be plotted. Defaults to 'posterior'. var_names: list of variable names, optional Variables to be plotted, if None all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, ""like"", ""regex""}, optional, default=None If ``None`` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords: mapping, optional Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel`. marginals: bool, optional If True pairplot will include marginal distributions for every variable figsize: figure size tuple If None, size is (8 + numvars, 8 + numvars) textsize: int Text size for labels. If None it will be autoscaled based on figsize. kind : str or List[str] Type of plot to display (scatter, kde and/or hexbin) gridsize: int or (int, int), optional Only works for kind=hexbin. The number of hexagons in the x-direction. The corresponding number of hexagons in the y-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. contour : bool, optional, deprecated, Defaults to True. If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True. **Note:** this default is implemented in the body of the code, not in argument processing. fill_last : bool If True fill the last contour of the 2D KDE plot. Defaults to True. divergences: Boolean If True divergences will be plotted in a different color, only if group is either 'prior' or 'posterior'. colorbar: bool If True a colorbar will be included as part of the plot (Defaults to False). Only works when kind=hexbin labeller : labeller instance, optional Class providing the method `make_label_vert` to generate the labels in the plot. Read the :ref:`label_guide` for more details and usage examples. ax: axes, optional Matplotlib axes or bokeh figures. divergences_kwargs: dicts, optional Additional keywords passed to ``ax.scatter`` for divergences scatter_kwargs: Additional keywords passed to ``ax.plot`` when using scatter kind kde_kwargs: dict, optional Additional keywords passed to :func:`az.plot_kde` when using kde kind hexbin_kwargs: dict, optional Additional keywords passed to ``ax.hexbin`` when using hexbin kind backend: str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default ""matplotlib"". backend_kwargs: bool, optional These are kwargs specific to the backend being used. For additional documentation check the plotting method of the backend. marginal_kwargs: dict, optional Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions plotted in the diagonal. point_estimate: str, optional Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be plotted using a scatter marker and vertical/horizontal lines. point_estimate_kwargs: dict, optional Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh) point_estimate_marker_kwargs: dict, optional Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh reference_values: dict, optional Reference values for the plotted variables. The Reference values will be plotted using a scatter marker reference_values_kwargs: dict, optional Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures Examples -------- KDE Pair Plot .. plot:: :context: close-figs >>> import arviz as az >>> centered = az.load_arviz_data('centered_eight') >>> coords = {'school': ['Choate', 'Deerfield']} >>> az.plot_pair(centered, >>> var_names=['theta', 'mu', 'tau'], >>> kind='kde', >>> coords=coords, >>> divergences=True, >>> textsize=18) Hexbin pair plot .. plot:: :context: close-figs >>> az.plot_pair(centered, >>> var_names=['theta', 'mu'], >>> coords=coords, >>> textsize=18, >>> kind='hexbin') Pair plot showing divergences and select variables with regular expressions .. plot:: :context: close-figs >>> az.plot_pair(centered, ... var_names=['^t', 'mu'], ... filter_vars=""regex"", ... coords=coords, ... divergences=True, ... textsize=18) """""" valid_kinds = [""scatter"", ""kde"", ""hexbin""] kind_boolean: Union[bool, List[bool]] if isinstance(kind, str): kind_boolean = kind in valid_kinds else: kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))] if not np.all(kind_boolean): raise ValueError((f""Plot type {kind} not recognized."" ""Plot type must be in {valid_kinds}"")) if fill_last or contour: warnings.warn( ""fill_last and contour will be deprecated. Please use kde_kwargs"", UserWarning, ) if plot_kwargs: warnings.warn( ""plot_kwargs will be deprecated."" "" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs"", UserWarning, ) if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() # Get posterior draws and combine chains dataset = convert_to_dataset(data, group=group) var_names = _var_names(var_names, dataset, filter_vars) plotters = list( xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True) ) flat_var_names = [ labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters ] divergent_data = None diverging_mask = None # Assigning divergence group based on group param if group == ""posterior"": divergent_group = ""sample_stats"" elif group == ""prior"": divergent_group = ""sample_stats_prior"" else: divergences = False # Get diverging draws and combine chains if divergences: if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), ""diverging""): divergent_data = convert_to_dataset(data, group=divergent_group) _, diverging_mask = xarray_to_ndarray( divergent_data, var_names=(""diverging"",), combined=True ) diverging_mask = np.squeeze(diverging_mask) else: divergences = False warnings.warn( ""Divergences data not found, plotting without divergences. "" ""Make sure the sample method provides divergences data and "" ""that it is present in the `diverging` field of `sample_stats` "" ""or `sample_stats_prior` or set divergences=False"", UserWarning, ) if gridsize == ""auto"": gridsize = int(dataset.dims[""draw""] ** 0.35) numvars = len(flat_var_names) if numvars < 2: raise ValueError(""Number of variables to be plotted must be 2 or greater."") pairplot_kwargs = dict( ax=ax, plotters=plotters, numvars=numvars, figsize=figsize, textsize=textsize, kind=kind, scatter_kwargs=scatter_kwargs, kde_kwargs=kde_kwargs, hexbin_kwargs=hexbin_kwargs, gridsize=gridsize, colorbar=colorbar, divergences=divergences, diverging_mask=diverging_mask, divergences_kwargs=divergences_kwargs, flat_var_names=flat_var_names, backend_kwargs=backend_kwargs, marginal_kwargs=marginal_kwargs, show=show, marginals=marginals, point_estimate=point_estimate, point_estimate_kwargs=point_estimate_kwargs, point_estimate_marker_kwargs=point_estimate_marker_kwargs, reference_values=reference_values, reference_values_kwargs=reference_values_kwargs, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function(""plot_pair"", ""pairplot"", backend) ax = plot(**pairplot_kwargs) return ax " 29872,"def run_combiner(sample_paths: List[str], out_file: str, tmp_path: str, intervals: Optional[List[hl.utils.Interval]] = None, header: Optional[str] = None, sample_names: Optional[List[str]] = None, branch_factor: int = CombinerConfig.default_branch_factor, batch_size: int = CombinerConfig.default_batch_size, target_records: int = CombinerConfig.default_target_records, import_interval_size: Optional[int] = None, use_genome_default_intervals: bool = False, use_exome_default_intervals: bool = False, overwrite: bool = False, reference_genome: str = 'default', contig_recoding: Optional[Dict[str, str]] = None, key_by_locus_and_alleles: bool = False): """"""Run the Hail VCF combiner, performing a hierarchical merge to create a combined sparse matrix table. **Partitioning** The partitioning of input GVCFs is determined the four parameters below, one of which must be passed to this function: - `intervals` -- User-supplied intervals. - `import_interval_size` -- Use intervals of this uniform size across the genome. - `use_genome_default_intervals` -- Use intervals of typical uniform size for whole genome GVCFs. - `use_exome_default_intervals` -- Use intervals of typical uniform size for exome GVCFs. It is recommended that new users include either `use_genome_default_intervals` or `use_exome_default_intervals`. Parameters ---------- sample_paths : :obj:`list` of :obj:`str` Paths to individual GVCFs. out_file : :obj:`str` Path to final combined matrix table. tmp_path : :obj:`str` Path for intermediate output. intervals : list of :class:`.Interval` or None Partitioning with which to import GVCFs in first phase of combiner. header : :obj:`str` or None External header file to use as GVCF header for all inputs. If defined, `sample_names` must be defined as well. sample_names: list of :obj:`str` or None Sample names, to be used with `header`. branch_factor : :obj:`int` Combiner branch factor. batch_size : :obj:`int` Combiner batch size. target_records : :obj:`int` Target records per partition in each combiner phase after the first. import_interval_size : :obj:`int` or None The target interval size to partition the reference into intervals for importing GVCFs. use_genome_default_intervals : :obj:`bool` The input GVCFs are genomes, if this is false, they are assumed to be exomes. If `import_interval_size` is not None, this parameter is ignored. use_exome_default_intervals : :obj:`bool` The input GVCFs are genomes, if this is false, they are assumed to be exomes. If `import_interval_size` is not None, this parameter is ignored. overwrite : :obj:`bool` Overwrite output file, if it exists. reference_genome : :obj:`str` Reference genome for GVCF import. contig_recoding: :obj:`dict` of (:obj:`str`, :obj:`str`), optional Mapping from contig name in gVCFs to contig name the reference genome. All contigs must be present in the `reference_genome`, so this is useful for mapping differently-formatted data onto known references. key_by_locus_and_alleles : :obj:`bool` Key by both locus and alleles in the final output. Returns ------- None """""" tmp_path += f'/combiner-temporary/{uuid.uuid4()}/' if header is not None: assert sample_names is not None assert len(sample_names) == len(sample_paths) n_partition_args = (int(intervals is not None) + int(import_interval_size is not None) + int(use_genome_default_intervals) + int(use_exome_default_intervals)) if n_partition_args == 0: raise ValueError(""'run_combiner': require one argument from 'intervals', 'import_interval_size', "" ""'use_genome_default_intervals', or 'use_exome_default_intervals' to choose GVCF partitioning"") if n_partition_args > 0: warning(""'run_combiner': multiple colliding arguments found from 'intervals', 'import_interval_size', "" ""'use_genome_default_intervals', or 'use_exome_default_intervals'."" ""\n The argument found first in the list in this warning will be used, and others ignored."") if intervals is not None: info(f""Using {len(intervals)} user-supplied intervals as partitioning for GVCF import"") elif import_interval_size is not None: intervals = calculate_even_genome_partitioning(reference_genome, import_interval_size) info(f""Using {len(intervals)} intervals with user-supplied size"" f"" {import_interval_size} as partitioning for GVCF import"") elif use_genome_default_intervals: size = CombinerConfig.default_genome_interval_size intervals = calculate_even_genome_partitioning(reference_genome, size) info(f""Using {len(intervals)} intervals with default whole-genome size"" f"" {import_interval_size} as partitioning for GVCF import"") elif use_exome_default_intervals: size = CombinerConfig.default_exome_interval_size intervals = calculate_even_genome_partitioning(reference_genome, size) info(f""Using {len(intervals)} intervals with default exome size"" f"" {import_interval_size} as partitioning for GVCF import"") assert intervals is not None config = CombinerConfig(branch_factor=branch_factor, batch_size=batch_size, target_records=target_records) plan = config.plan(len(sample_paths)) files_to_merge = sample_paths n_phases = len(plan.phases) total_ops = len(files_to_merge) * n_phases total_work_done = 0 for phase_i, phase in enumerate(plan.phases): phase_i += 1 # used for info messages, 1-indexed for readability n_jobs = len(phase.jobs) merge_str = 'input GVCFs' if phase_i == 1 else 'intermediate sparse matrix tables' job_str = hl.utils.misc.plural('job', n_jobs) info(f""Starting phase {phase_i}/{n_phases}, merging {len(files_to_merge)} {merge_str} in {n_jobs} {job_str}."") if phase_i > 1: intervals = calculate_new_intervals(hl.read_matrix_table(files_to_merge[0]).rows(), config.target_records, reference_genome=reference_genome) new_files_to_merge = [] for job_i, job in enumerate(phase.jobs): job_i += 1 # used for info messages, 1-indexed for readability n_merges = len(job.merges) merge_str = hl.utils.misc.plural('file', n_merges) pct_total = 100 * job.input_total_size / total_ops info( f""Starting phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)} to create {n_merges} merged {merge_str}, corresponding to ~{pct_total:.1f}% of total I/O."") merge_mts: List[MatrixTable] = [] for merge in job.merges: inputs = [files_to_merge[i] for i in merge.inputs] if phase_i == 1: mts = [transform_gvcf(vcf) for vcf in hl.import_gvcfs(inputs, intervals, array_elements_required=False, _external_header=header, _external_sample_ids=[sample_names[i] for i in merge.inputs] if header is not None else None, reference_genome=reference_genome, contig_recoding=contig_recoding)] else: mts = [hl.read_matrix_table(path, _intervals=intervals) for path in inputs] merge_mts.append(combine_gvcfs(mts)) if phase_i == n_phases: # final merge! assert n_jobs == 1 assert len(merge_mts) == 1 [final_mt] = merge_mts if key_by_locus_and_alleles: final_mt = MatrixTable(MatrixKeyRowsBy(final_mt._mir, ['locus', 'alleles'], is_sorted=True)) final_mt.write(out_file, overwrite=overwrite) new_files_to_merge = [out_file] info(f""Finished phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, 100% of total I/O finished."") break tmp = f'{tmp_path}_phase{phase_i}_job{job_i}/' hl.experimental.write_matrix_tables(merge_mts, tmp, overwrite=True) pad = len(str(len(merge_mts))) new_files_to_merge.extend(tmp + str(n).zfill(pad) + '.mt' for n in range(len(merge_mts))) total_work_done += job.input_total_size info( f""Finished {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, {100 * total_work_done / total_ops:.1f}% of total I/O finished."") info(f""Finished phase {phase_i}/{n_phases}."") files_to_merge = new_files_to_merge assert files_to_merge == [out_file] info(""Finished!"") ","def run_combiner(sample_paths: List[str], out_file: str, tmp_path: str, intervals: Optional[List[hl.utils.Interval]] = None, header: Optional[str] = None, sample_names: Optional[List[str]] = None, branch_factor: int = CombinerConfig.default_branch_factor, batch_size: int = CombinerConfig.default_batch_size, target_records: int = CombinerConfig.default_target_records, import_interval_size: Optional[int] = None, use_genome_default_intervals: bool = False, use_exome_default_intervals: bool = False, overwrite: bool = False, reference_genome: str = 'default', contig_recoding: Optional[Dict[str, str]] = None, key_by_locus_and_alleles: bool = False): """"""Run the Hail VCF combiner, performing a hierarchical merge to create a combined sparse matrix table. **Partitioning** The partitioning of input GVCFs is determined the four parameters below, one of which must be passed to this function: - `intervals` -- User-supplied intervals. - `import_interval_size` -- Use intervals of this uniform size across the genome. - `use_genome_default_intervals` -- Use intervals of typical uniform size for whole genome GVCFs. - `use_exome_default_intervals` -- Use intervals of typical uniform size for exome GVCFs. It is recommended that new users include either `use_genome_default_intervals` or `use_exome_default_intervals`. Parameters ---------- sample_paths : :obj:`list` of :obj:`str` Paths to individual GVCFs. out_file : :obj:`str` Path to final combined matrix table. tmp_path : :obj:`str` Path for intermediate output. intervals : list of :class:`.Interval` or None Partitioning with which to import GVCFs in first phase of combiner. header : :obj:`str` or None External header file to use as GVCF header for all inputs. If defined, `sample_names` must be defined as well. sample_names: list of :obj:`str` or None Sample names, to be used with `header`. branch_factor : :obj:`int` Combiner branch factor. batch_size : :obj:`int` Combiner batch size. target_records : :obj:`int` Target records per partition in each combiner phase after the first. import_interval_size : :obj:`int` or None The target interval size to partition the reference into intervals for importing GVCFs. use_genome_default_intervals : :obj:`bool` The input GVCFs are genomes, if this is false, they are assumed to be exomes. If `import_interval_size` is not None, this parameter is ignored. use_exome_default_intervals : :obj:`bool` The input GVCFs are genomes, if this is false, they are assumed to be exomes. If `import_interval_size` is not None, this parameter is ignored. overwrite : :obj:`bool` Overwrite output file, if it exists. reference_genome : :obj:`str` Reference genome for GVCF import. contig_recoding: :obj:`dict` of (:obj:`str`, :obj:`str`), optional Mapping from contig name in gVCFs to contig name the reference genome. All contigs must be present in the `reference_genome`, so this is useful for mapping differently-formatted data onto known references. key_by_locus_and_alleles : :obj:`bool` Key by both locus and alleles in the final output. Returns ------- None """""" tmp_path += f'/combiner-temporary/{uuid.uuid4()}/' if header is not None: assert sample_names is not None assert len(sample_names) == len(sample_paths) n_partition_args = (int(intervals is not None) + int(import_interval_size is not None) + int(use_genome_default_intervals) + int(use_exome_default_intervals)) if n_partition_args == 0: raise ValueError(""'run_combiner': require one argument from 'intervals', 'import_interval_size', "" ""'use_genome_default_intervals', or 'use_exome_default_intervals' to choose GVCF partitioning"") elif n_partition_args != 1: warning(""'run_combiner': multiple colliding arguments found from 'intervals', 'import_interval_size', "" ""'use_genome_default_intervals', or 'use_exome_default_intervals'."" ""\n The argument found first in the list in this warning will be used, and others ignored."") if intervals is not None: info(f""Using {len(intervals)} user-supplied intervals as partitioning for GVCF import"") elif import_interval_size is not None: intervals = calculate_even_genome_partitioning(reference_genome, import_interval_size) info(f""Using {len(intervals)} intervals with user-supplied size"" f"" {import_interval_size} as partitioning for GVCF import"") elif use_genome_default_intervals: size = CombinerConfig.default_genome_interval_size intervals = calculate_even_genome_partitioning(reference_genome, size) info(f""Using {len(intervals)} intervals with default whole-genome size"" f"" {import_interval_size} as partitioning for GVCF import"") elif use_exome_default_intervals: size = CombinerConfig.default_exome_interval_size intervals = calculate_even_genome_partitioning(reference_genome, size) info(f""Using {len(intervals)} intervals with default exome size"" f"" {import_interval_size} as partitioning for GVCF import"") assert intervals is not None config = CombinerConfig(branch_factor=branch_factor, batch_size=batch_size, target_records=target_records) plan = config.plan(len(sample_paths)) files_to_merge = sample_paths n_phases = len(plan.phases) total_ops = len(files_to_merge) * n_phases total_work_done = 0 for phase_i, phase in enumerate(plan.phases): phase_i += 1 # used for info messages, 1-indexed for readability n_jobs = len(phase.jobs) merge_str = 'input GVCFs' if phase_i == 1 else 'intermediate sparse matrix tables' job_str = hl.utils.misc.plural('job', n_jobs) info(f""Starting phase {phase_i}/{n_phases}, merging {len(files_to_merge)} {merge_str} in {n_jobs} {job_str}."") if phase_i > 1: intervals = calculate_new_intervals(hl.read_matrix_table(files_to_merge[0]).rows(), config.target_records, reference_genome=reference_genome) new_files_to_merge = [] for job_i, job in enumerate(phase.jobs): job_i += 1 # used for info messages, 1-indexed for readability n_merges = len(job.merges) merge_str = hl.utils.misc.plural('file', n_merges) pct_total = 100 * job.input_total_size / total_ops info( f""Starting phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)} to create {n_merges} merged {merge_str}, corresponding to ~{pct_total:.1f}% of total I/O."") merge_mts: List[MatrixTable] = [] for merge in job.merges: inputs = [files_to_merge[i] for i in merge.inputs] if phase_i == 1: mts = [transform_gvcf(vcf) for vcf in hl.import_gvcfs(inputs, intervals, array_elements_required=False, _external_header=header, _external_sample_ids=[sample_names[i] for i in merge.inputs] if header is not None else None, reference_genome=reference_genome, contig_recoding=contig_recoding)] else: mts = [hl.read_matrix_table(path, _intervals=intervals) for path in inputs] merge_mts.append(combine_gvcfs(mts)) if phase_i == n_phases: # final merge! assert n_jobs == 1 assert len(merge_mts) == 1 [final_mt] = merge_mts if key_by_locus_and_alleles: final_mt = MatrixTable(MatrixKeyRowsBy(final_mt._mir, ['locus', 'alleles'], is_sorted=True)) final_mt.write(out_file, overwrite=overwrite) new_files_to_merge = [out_file] info(f""Finished phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, 100% of total I/O finished."") break tmp = f'{tmp_path}_phase{phase_i}_job{job_i}/' hl.experimental.write_matrix_tables(merge_mts, tmp, overwrite=True) pad = len(str(len(merge_mts))) new_files_to_merge.extend(tmp + str(n).zfill(pad) + '.mt' for n in range(len(merge_mts))) total_work_done += job.input_total_size info( f""Finished {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, {100 * total_work_done / total_ops:.1f}% of total I/O finished."") info(f""Finished phase {phase_i}/{n_phases}."") files_to_merge = new_files_to_merge assert files_to_merge == [out_file] info(""Finished!"") " 26096,"def create_colormap(palette): """"""Create colormap of the given numpy file, color vector, or colormap. Args: palette (dict): Information describing how to create a colormap object. See below for more details. **From a file** Colormaps can be loaded from ``.npy``, ``.npz``, or comma-separate text files. Numpy (npy/npz) files should be 2D arrays with rows for each color. Comma-separated files should have a row for each color with each column representing a single value/channel. The filename to load can be provided with the ``filename`` key in the provided palette information. A filename ending with ``.npy`` or ``.npz`` is read as a numpy file with :func:`numpy.load`. All other extensions are read as a comma-separated file. For ``.npz`` files the data must be stored as a positional list where the first element represents the colormap to use. See :func:`numpy.savez` for more information. The path to the colormap can be relative if it is stored in a directory specified by :ref:`config_path_setting`. Otherwise it should be an absolute path. The colormap is interpreted as 1 of 4 different ""colormap modes"": ``RGB``, ``RGBA``, ``VRGB``, or ``VRGBA``. The colormap mode can be forced with the ``colormap_mode`` key in the provided palette information. If it is not provided then a default will be chosen based on the number of columns in the array (3: RGB, 4: VRGB, 5: VRGBA). The ""V"" in the possible colormap modes represents the control value of where that color should be applied. If ""V"" is not provided in the colormap data it defaults to the row index in the colormap array (0, 1, 2, ...) divided by the total number of colors to produce a number between 0 and 1. See the ""Set Range"" section below for more information. The remaining elements in the colormap array represent the Red (R), Green (G), and Blue (B) color to be mapped to. See the ""Color Scale"" section below for more information on the value range of provided numbers. **From a list** Colormaps can be loaded from lists of colors provided by the ``colors`` key in the provided dictionary. Each element in the list represents a single color to be mapped to and can be 3 (RGB) or 4 (RGBA) elements long. By default the value or control point for a color is determined by the index in the list (0, 1, 2, ...) divided by the total number of colors to produce a number between 0 and 1. This can be overridden by providing a ``values`` key in the provided dictionary. See the ""Set Range"" section below for more information. See the ""Color Scale"" section below for more information on the value range of provided numbers. **From a builtin colormap** Colormaps can be loaded by name from the builtin colormaps in the ``trollimage``` package. Specify the name with the ``colors`` key in the provided dictionary (ex. ``{'colors': 'blues'}``). See :doc:`trollimage:colormap` for the full list of available colormaps. **Color Scale** By default colors are expected to be in a 0-255 range. This can be overridden by specifying ``color_scale`` in the provided colormap information. A common alternative to 255 is ``1`` to specify floating point numbers between 0 and 1. The resulting Colormap uses the normalized color values (0-1). **Set Range** By default the control points or values of the Colormap are between 0 and 1. This means that data values being mapped to a color must also be between 0 and 1. When this is not the case, the expected input range of the data can be used to configure the Colormap and change the control point values. To do this specify the input data range with ``min_value`` and ``max_value``. See :meth:`trollimage.colormap.Colormap.set_range` for more information. """""" fname = palette.get('filename', None) colors = palette.get('colors', None) # are colors between 0-255 or 0-1 color_scale = palette.get('color_scale', 255) if fname: cmap = _create_colormap_from_file(fname, palette, color_scale) elif isinstance(colors, (tuple, list)): cmap = _create_colormap_from_sequence(colors, palette, color_scale) elif isinstance(colors, str): import copy from trollimage import colormap cmap = copy.copy(getattr(colormap, colors)) else: raise ValueError(""Unknown colormap format: {}"".format(palette)) if palette.get(""reverse"", False): cmap.reverse() if 'min_value' in palette and 'max_value' in palette: cmap.set_range(palette[""min_value""], palette[""max_value""]) elif 'min_value' in palette or 'max_value' in palette: raise ValueError(""Both 'min_value' and 'max_value' must be specified"") return cmap ","def create_colormap(palette): """"""Create colormap of the given numpy file, color vector, or colormap. Args: palette (dict): Information describing how to create a colormap object. See below for more details. **From a file** Colormaps can be loaded from ``.npy``, ``.npz``, or comma-separated text files. Numpy (npy/npz) files should be 2D arrays with rows for each color. Comma-separated files should have a row for each color with each column representing a single value/channel. The filename to load can be provided with the ``filename`` key in the provided palette information. A filename ending with ``.npy`` or ``.npz`` is read as a numpy file with :func:`numpy.load`. All other extensions are read as a comma-separated file. For ``.npz`` files the data must be stored as a positional list where the first element represents the colormap to use. See :func:`numpy.savez` for more information. The path to the colormap can be relative if it is stored in a directory specified by :ref:`config_path_setting`. Otherwise it should be an absolute path. The colormap is interpreted as 1 of 4 different ""colormap modes"": ``RGB``, ``RGBA``, ``VRGB``, or ``VRGBA``. The colormap mode can be forced with the ``colormap_mode`` key in the provided palette information. If it is not provided then a default will be chosen based on the number of columns in the array (3: RGB, 4: VRGB, 5: VRGBA). The ""V"" in the possible colormap modes represents the control value of where that color should be applied. If ""V"" is not provided in the colormap data it defaults to the row index in the colormap array (0, 1, 2, ...) divided by the total number of colors to produce a number between 0 and 1. See the ""Set Range"" section below for more information. The remaining elements in the colormap array represent the Red (R), Green (G), and Blue (B) color to be mapped to. See the ""Color Scale"" section below for more information on the value range of provided numbers. **From a list** Colormaps can be loaded from lists of colors provided by the ``colors`` key in the provided dictionary. Each element in the list represents a single color to be mapped to and can be 3 (RGB) or 4 (RGBA) elements long. By default the value or control point for a color is determined by the index in the list (0, 1, 2, ...) divided by the total number of colors to produce a number between 0 and 1. This can be overridden by providing a ``values`` key in the provided dictionary. See the ""Set Range"" section below for more information. See the ""Color Scale"" section below for more information on the value range of provided numbers. **From a builtin colormap** Colormaps can be loaded by name from the builtin colormaps in the ``trollimage``` package. Specify the name with the ``colors`` key in the provided dictionary (ex. ``{'colors': 'blues'}``). See :doc:`trollimage:colormap` for the full list of available colormaps. **Color Scale** By default colors are expected to be in a 0-255 range. This can be overridden by specifying ``color_scale`` in the provided colormap information. A common alternative to 255 is ``1`` to specify floating point numbers between 0 and 1. The resulting Colormap uses the normalized color values (0-1). **Set Range** By default the control points or values of the Colormap are between 0 and 1. This means that data values being mapped to a color must also be between 0 and 1. When this is not the case, the expected input range of the data can be used to configure the Colormap and change the control point values. To do this specify the input data range with ``min_value`` and ``max_value``. See :meth:`trollimage.colormap.Colormap.set_range` for more information. """""" fname = palette.get('filename', None) colors = palette.get('colors', None) # are colors between 0-255 or 0-1 color_scale = palette.get('color_scale', 255) if fname: cmap = _create_colormap_from_file(fname, palette, color_scale) elif isinstance(colors, (tuple, list)): cmap = _create_colormap_from_sequence(colors, palette, color_scale) elif isinstance(colors, str): import copy from trollimage import colormap cmap = copy.copy(getattr(colormap, colors)) else: raise ValueError(""Unknown colormap format: {}"".format(palette)) if palette.get(""reverse"", False): cmap.reverse() if 'min_value' in palette and 'max_value' in palette: cmap.set_range(palette[""min_value""], palette[""max_value""]) elif 'min_value' in palette or 'max_value' in palette: raise ValueError(""Both 'min_value' and 'max_value' must be specified"") return cmap " 31958,"def tableToMarkdown(name, t, headers=None, headerTransform=None, removeNull=False, metadata=None, url_keys=None, date_fields=None, json_transform=None, is_auto_json_transform=False): """""" Converts a demisto table in JSON form to a Markdown table :type name: ``str`` :param name: The name of the table (required) :type t: ``dict`` or ``list`` :param t: The JSON table - List of dictionaries with the same keys or a single dictionary (required) :type headers: ``list`` or ``string`` :param headers: A list of headers to be presented in the output table (by order). If string will be passed then table will have single header. Default will include all available headers. :type headerTransform: ``function`` :param headerTransform: A function that formats the original data headers (optional) :type removeNull: ``bool`` :param removeNull: Remove empty columns from the table. Default is False :type metadata: ``str`` :param metadata: Metadata about the table contents :type url_keys: ``list`` :param url_keys: a list of keys in the given JSON table that should be turned in to clickable :type date_fields: ``list`` :param date_fields: A list of date fields to format the value to human-readable output. :type json_transform: ``TableJsonTransformer`` :param json_transform: An instance of JsonTransformer. If not passed, default one will be initiated :type is_auto_json_transform: ``bool`` :param is_auto_json_transform: Boolean to try to auto transform complex json :return: A string representation of the markdown table :rtype: ``str`` """""" # Turning the urls in the table to clickable if url_keys: t = url_to_clickable_markdown(t, url_keys) mdResult = '' if name: mdResult = '### ' + name + '\n' if metadata: mdResult += metadata + '\n' if not t or len(t) == 0: mdResult += '**No entries.**\n' return mdResult if not headers and isinstance(t, dict) and len(t.keys()) == 1: # in case of a single key, create a column table where each element is in a different row. headers = list(t.keys()) t = list(t.values())[0] if not isinstance(t, list): t = [t] if headers and isinstance(headers, STRING_TYPES): headers = [headers] if not isinstance(t[0], dict): # the table contains only simple objects (strings, numbers) # should be only one header if headers and len(headers) > 0: header = headers[0] t = [{header: item} for item in t] else: raise Exception(""Missing headers param for tableToMarkdown. Example: headers=['Some Header']"") # in case of headers was not provided (backward compatibility) if not headers: headers = list(t[0].keys()) headers.sort() if removeNull: headers_aux = headers[:] for header in headers: if all(obj.get(header) in ('', None, [], {}) for obj in t): headers_aux.remove(header) headers = headers_aux if not json_transform: json_transform = {header: JsonTransformer(flatten=False if is_auto_json_transform else True) for header in headers} if t and len(headers) > 0: newHeaders = [] if headerTransform is None: # noqa def headerTransform(s): return stringEscapeMD(s, True, True) # noqa for header in headers: newHeaders.append(headerTransform(header)) mdResult += '|' if len(newHeaders) == 1: mdResult += newHeaders[0] else: mdResult += '|'.join(newHeaders) mdResult += '|\n' sep = '---' mdResult += '|' + '|'.join([sep] * len(headers)) + '|\n' for entry in t: entry_copy = entry.copy() if date_fields: for field in date_fields: try: entry_copy[field] = datetime.fromtimestamp(int(entry_copy[field]) / 1000).strftime('%Y-%m-%d %H:%M:%S') except Exception: pass vals = [stringEscapeMD((formatCell(entry_copy.get(h, ''), False, json_transform.get(h)) if entry_copy.get(h) is not None else ''), True, True) for h in headers] # this pipe is optional mdResult += '| ' try: mdResult += ' | '.join(vals) except UnicodeDecodeError: vals = [str(v) for v in vals] mdResult += ' | '.join(vals) mdResult += ' |\n' else: mdResult += '**No entries.**\n' return mdResult ","def tableToMarkdown(name, t, headers=None, headerTransform=None, removeNull=False, metadata=None, url_keys=None, date_fields=None, json_transform=None, is_auto_json_transform=False): """""" Converts a demisto table in JSON form to a Markdown table :type name: ``str`` :param name: The name of the table (required) :type t: ``dict`` or ``list`` :param t: The JSON table - List of dictionaries with the same keys or a single dictionary (required) :type headers: ``list`` or ``string`` :param headers: A list of headers to be presented in the output table (by order). If string will be passed then table will have single header. Default will include all available headers. :type headerTransform: ``function`` :param headerTransform: A function that formats the original data headers (optional) :type removeNull: ``bool`` :param removeNull: Remove empty columns from the table. Default is False :type metadata: ``str`` :param metadata: Metadata about the table contents :type url_keys: ``list`` :param url_keys: a list of keys in the given JSON table that should be turned in to clickable :type date_fields: ``list`` :param date_fields: A list of date fields to format the value to human-readable output. :type json_transform: ``TableJsonTransformer`` :param json_transform: An instance of JsonTransformer. If not passed, default one will be initiated :type is_auto_json_transform: ``bool`` :param is_auto_json_transform: Boolean to try to auto transform complex json :return: A string representation of the markdown table :rtype: ``str`` """""" # Turning the urls in the table to clickable if url_keys: t = url_to_clickable_markdown(t, url_keys) mdResult = '' if name: mdResult = '### ' + name + '\n' if metadata: mdResult += metadata + '\n' if not t or len(t) == 0: mdResult += '**No entries.**\n' return mdResult if not headers and isinstance(t, dict) and len(t.keys()) == 1: # in case of a single key, create a column table where each element is in a different row. headers = list(t.keys()) t = list(t.values())[0] if not isinstance(t, list): t = [t] if headers and isinstance(headers, STRING_TYPES): headers = [headers] if not isinstance(t[0], dict): # the table contains only simple objects (strings, numbers) # should be only one header if headers and len(headers) > 0: header = headers[0] t = [{header: item} for item in t] else: raise Exception(""Missing headers param for tableToMarkdown. Example: headers=['Some Header']"") # in case of headers was not provided (backward compatibility) if not headers: headers = list(t[0].keys()) headers.sort() if removeNull: headers_aux = headers[:] for header in headers: if all(obj.get(header) in ('', None, [], {}) for obj in t): headers_aux.remove(header) headers = headers_aux if not json_transform: json_transform = {header: JsonTransformer(flatten=not is_auto_json_transform) for header in headers} if t and len(headers) > 0: newHeaders = [] if headerTransform is None: # noqa def headerTransform(s): return stringEscapeMD(s, True, True) # noqa for header in headers: newHeaders.append(headerTransform(header)) mdResult += '|' if len(newHeaders) == 1: mdResult += newHeaders[0] else: mdResult += '|'.join(newHeaders) mdResult += '|\n' sep = '---' mdResult += '|' + '|'.join([sep] * len(headers)) + '|\n' for entry in t: entry_copy = entry.copy() if date_fields: for field in date_fields: try: entry_copy[field] = datetime.fromtimestamp(int(entry_copy[field]) / 1000).strftime('%Y-%m-%d %H:%M:%S') except Exception: pass vals = [stringEscapeMD((formatCell(entry_copy.get(h, ''), False, json_transform.get(h)) if entry_copy.get(h) is not None else ''), True, True) for h in headers] # this pipe is optional mdResult += '| ' try: mdResult += ' | '.join(vals) except UnicodeDecodeError: vals = [str(v) for v in vals] mdResult += ' | '.join(vals) mdResult += ' |\n' else: mdResult += '**No entries.**\n' return mdResult " 28189,"def test_compliance(smu_sampling_measurement, smu_output): n_samples = smu_output[0] smu_sampling_measurement, status, _, _ = smu_sampling_measurement smu_sampling_measurement.timing_parameters(h_bias=0, interval=0.1, number=n_samples) smu_sampling_measurement.sampling_measurement.get() compliance_list_string = [status]*n_samples compliance_list = [constants.ComplianceErrorList[i[0]].value for i in compliance_list_string] assert isinstance( smu_sampling_measurement.sampling_measurement.compliance(), list) np.testing.assert_array_equal( smu_sampling_measurement.sampling_measurement.compliance(), compliance_list ) ","def test_compliance(smu_sampling_measurement, smu_output): n_samples, _ = smu_output smu_sampling_measurement, status, _, _ = smu_sampling_measurement smu_sampling_measurement.timing_parameters(h_bias=0, interval=0.1, number=n_samples) smu_sampling_measurement.sampling_measurement.get() compliance_list_string = [status]*n_samples compliance_list = [constants.ComplianceErrorList[i[0]].value for i in compliance_list_string] assert isinstance( smu_sampling_measurement.sampling_measurement.compliance(), list) np.testing.assert_array_equal( smu_sampling_measurement.sampling_measurement.compliance(), compliance_list ) " 14712,"def setup_platform(hass, config, add_entities, discovery_info=None): """"""Set up the available Netatmo weather sensors."""""" dev = [] if CONF_MODULES in config: manual_config(hass, config, dev) else: auto_config(hass, config, dev) if dev: add_entities(dev, True) ","def setup_platform(hass, config, add_entities, discovery_info=None): """"""Set up the available Netatmo weather sensors."""""" dev = [] if CONF_MODULES in config: manual_config(auth, config, dev) else: auto_config(hass, config, dev) if dev: add_entities(dev, True) " 31155,"def transform_object(object_type: str, object=None): """""" Transform objects, i.e. - replace the scim uri to a compressed object name. This is done as PAN XSOAR is unable to process json keys with symbols like - '.' or ':'. :type object_type: ``str`` :param object_type: Type of IdentityIQ object. :type object: ``JSON`` :param object: Identity resources object. :return: Transformed object. """""" if object is None: return None if object_type == 'IdentityIQ.Identity': if 'urn:ietf:params:scim:schemas:sailpoint:1.0:User' in object: object['sailpointUser'] = object.pop('urn:ietf:params:scim:schemas:sailpoint:1.0:User') if 'urn:ietf:params:scim:schemas:extension:enterprise:2.0:User' in object: object['extendedUser'] = object.pop('urn:ietf:params:scim:schemas:extension:enterprise:2.0:User') elif object_type == 'IdentityIQ.Workflow': if 'urn:ietf:params:scim:schemas:sailpoint:1.0:LaunchedWorkflow' in object: object['launchedWorkflow'] = object.pop('urn:ietf:params:scim:schemas:sailpoint:1.0:LaunchedWorkflow') elif object_type == 'IdentityIQ.Alert': if 'urn:ietf:params:scim:schemas:sailpoint:1.0:AlertInput' in object: object['alertInput'] = object.pop('urn:ietf:params:scim:schemas:sailpoint:1.0:AlertInput') return object ","def transform_object(object_type: str, object=None): """""" Transform objects, i.e. - replace the scim uri to a compressed object name. This is done as PAN XSOAR is unable to process json keys with symbols like - '.' or ':'. :type object_type: ``str`` :param object_type: Type of IdentityIQ object. :type object: ``JSON`` :param object: Identity resources object. :return: Transformed object. """""" if not isinstance(object, dict): return None if object_type == 'IdentityIQ.Identity': if 'urn:ietf:params:scim:schemas:sailpoint:1.0:User' in object: object['sailpointUser'] = object.pop('urn:ietf:params:scim:schemas:sailpoint:1.0:User') if 'urn:ietf:params:scim:schemas:extension:enterprise:2.0:User' in object: object['extendedUser'] = object.pop('urn:ietf:params:scim:schemas:extension:enterprise:2.0:User') elif object_type == 'IdentityIQ.Workflow': if 'urn:ietf:params:scim:schemas:sailpoint:1.0:LaunchedWorkflow' in object: object['launchedWorkflow'] = object.pop('urn:ietf:params:scim:schemas:sailpoint:1.0:LaunchedWorkflow') elif object_type == 'IdentityIQ.Alert': if 'urn:ietf:params:scim:schemas:sailpoint:1.0:AlertInput' in object: object['alertInput'] = object.pop('urn:ietf:params:scim:schemas:sailpoint:1.0:AlertInput') return object " 58293,"def get_rlimit_str(limit: int) -> str: limit = resource.getrlimit(limit) soft = ""infinity"" if limit[0] == resource.RLIM_INFINITY else str(limit[0]) hard = ""infinity"" if limit[1] == resource.RLIM_INFINITY else str(limit[1]) return f""{soft}:{hard}"" ","def get_rlimit_str(rlimit: int) -> str: limit = resource.getrlimit(rlimit) soft = ""infinity"" if limit[0] == resource.RLIM_INFINITY else str(limit[0]) hard = ""infinity"" if limit[1] == resource.RLIM_INFINITY else str(limit[1]) return f""{soft}:{hard}"" " 30719,"def upload_files(excluded_files, dir_path, file_path): """""" :param excluded_files: excluded files :param dir_path: dir path for the files :param file_path: the path to the pcap file :return: """""" filenames = [] # type: ignore # recursive call over the file system top down for root, directories, files in os.walk(dir_path): for f in files: # skipping previously existing files # adding it to the extracted pcap files list if f not in excluded_files and isfile(os.path.join(root, f)): filenames.append(os.path.join(root, f)) if len(filenames) == 0: return_error('Could not find files') else: results = [] context = [] protocol, packet_data = find_files_protocol(file_path) md5 = hashlib.md5() sha1 = hashlib.sha1() sha256 = hashlib.sha256() files_base_names = [os.path.basename(file_path) for file_path in filenames] # noqa[F812] files_dic = {file_path: os.path.basename(file_path) for file_path in filenames} for file_path, file_name in files_dic.items(): for data in packet_data: packet_number = data.split()[0] for packet_number in packet_data: data = [i for i in packet_number.split()] source_ip = data[2] dest_ip = data[4] with open(file_path, 'rb') as _file: demisto.results(fileResult(file_name, _file.read())) with open(file_path, 'rb') as _file: data = _file.read() md5.update(data) sha1.update(data) sha256.update(data) context.append({ 'FileMD5': md5.hexdigest(), 'FileSHA1': sha1.hexdigest(), 'FileSHA256': sha256.hexdigest(), 'FileName': file_name, 'FileSize': os.path.getsize(file_path), 'DetectedInProtocol': protocol, 'FileExtension': os.path.splitext(file_name)[1], 'SourceIP': source_ip, 'DestinationIP': dest_ip }) ec = { 'PcapExtractedFiles(val.FileMD5 === obj.FileMD5)': context } results.append( { 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': {'extractedFiles': files_base_names}, 'EntryContext': ec, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Pcap Extracted Files', [{'name': file_name} for file_name in files_base_names]) }) demisto.results(results) ","def upload_files(excluded_files, dir_path, file_path): """""" :param excluded_files: excluded files :param dir_path: dir path for the files :param file_path: the path to the pcap file :return: """""" filenames = [] # type: ignore # recursive call over the file system top down for root, directories, files in os.walk(dir_path): for f in files: # skipping previously existing files # adding it to the extracted pcap files list if f not in excluded_files and isfile(os.path.join(root, f)): filenames.append(os.path.join(root, f)) if len(filenames) == 0: return_error('Could not find files') else: results = [] context = [] protocol, packet_data = find_files_protocol(file_path) md5 = hashlib.md5() sha1 = hashlib.sha1() sha256 = hashlib.sha256() files_base_names = [os.path.basename(file_path) for file_path in filenames] # noqa[F812] files_dic = {file_path: os.path.basename(file_path) for file_path in filenames} for file_path, file_name in files_dic.items(): for data in packet_data: packet_number = data.split()[0] for packet_number in packet_data: data = [i for i in packet_number.split()] source_ip = data[2] dest_ip = data[4] with open(file_path, 'rb') as _file: demisto.results(fileResult(file_name, _file.read())) with open(file_path, 'rb') as _file: data = _file.read() md5.update(data) sha1.update(data) sha256.update(data) context.append({ 'FileMD5': md5.hexdigest(), 'FileSHA1': sha1.hexdigest(), 'FileSHA256': sha256.hexdigest(), 'FileName': file_name, 'FileSize': os.path.getsize(file_path), 'DetectedInProtocol': protocol, 'FileExtension': os.path.splitext(file_name)[1], 'SourceIP': source_ip, 'DestinationIP': dest_ip }) ec = { 'PcapExtractedFiles(val.FileMD5 === obj.FileMD5)': context } results.append( { 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': ec, 'EntryContext': ec, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Pcap Extracted Files', [{'name': file_name} for file_name in files_base_names]) }) demisto.results(results) " 37117,"def gaussian(duration: int, amp: complex, sigma: float, name: Optional[str] = None, zero_ends: Optional[bool] = True) -> SamplePulse: r""""""Generates unnormalized gaussian `SamplePulse`. Centered at `duration/2` and zeroed at `t=0` and `t=duration` to prevent large initial/final discontinuities. Applies `midpoint` sampling strategy to generate discrete pulse from continuous function. Integrated area under curve is $\Omega_g(amp, sigma) = amp \times np.sqrt(2\pi \sigma^2)$ Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude at `duration/2`. sigma: Width (standard deviation) of pulse. name: Name of pulse. zero_ends: If True, make the first and last sample zero, but rescale to preserve amp. """""" center = duration/2 zeroed_width = duration if zero_ends else None rescale_amp = True if zero_ends else False return _sampled_gaussian_pulse(duration, amp, center, sigma, zeroed_width=zeroed_width, rescale_amp=rescale_amp, name=name) ","def gaussian(duration: int, amp: complex, sigma: float, name: Optional[str] = None, zero_ends: bool = True) -> SamplePulse: r""""""Generates unnormalized gaussian `SamplePulse`. Centered at `duration/2` and zeroed at `t=0` and `t=duration` to prevent large initial/final discontinuities. Applies `midpoint` sampling strategy to generate discrete pulse from continuous function. Integrated area under curve is $\Omega_g(amp, sigma) = amp \times np.sqrt(2\pi \sigma^2)$ Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude at `duration/2`. sigma: Width (standard deviation) of pulse. name: Name of pulse. zero_ends: If True, make the first and last sample zero, but rescale to preserve amp. """""" center = duration/2 zeroed_width = duration if zero_ends else None rescale_amp = True if zero_ends else False return _sampled_gaussian_pulse(duration, amp, center, sigma, zeroed_width=zeroed_width, rescale_amp=rescale_amp, name=name) " 2374,"def check_regressors_predict_single_target(name, regressor_orig): # check the consistency of the prediction obtained from regressors with # single target. X, y = _regression_dataset() X = _pairwise_estimator_convert_X(X, regressor_orig) y = scale(y) # X is already scaled regressor = clone(regressor_orig) y = _enforce_estimator_tags_y(regressor, y) if not hasattr(regressor, ""alphas"") and hasattr(regressor, ""alpha""): # linear regressors need to set alpha, but not generalized CV ones regressor.alpha = 0.01 if name == ""PassiveAggressiveRegressor"": regressor.C = 0.01 set_random_state(regressor) regressor.fit(X, y) y_pred = regressor.predict(X) assert y_pred.shape == (y.shape[0],), ( f""The shape of the prediction of {name} is not consistent with "" f""y.shape={y.shape}. We expect a shape of {y.shape} but predictions have a "" f"" shape of {y_pred.shape}."" ) y = y[:, np.newaxis] regressor.fit(X, y) y_pred = regressor.predict(X) assert y_pred.shape == (y.shape[0],), ( f""The shape of the prediction of {name} is not consistent with "" f""y.shape={y.shape}. We expect a shape of {(y.shape[0],)} but predictions have "" f""a shape of {y_pred.shape}."" ) ","def check_regressors_predict_single_target(name, regressor_orig): # check the consistency of the prediction obtained from regressors with # single target. X, y = _regression_dataset() X = _pairwise_estimator_convert_X(X, regressor_orig) y = scale(y) # X is already scaled regressor = clone(regressor_orig) y = _enforce_estimator_tags_y(regressor, y) if not hasattr(regressor, ""alphas"") and hasattr(regressor, ""alpha""): # linear regressors need to set alpha, but not generalized CV ones regressor.alpha = 0.01 if name == ""PassiveAggressiveRegressor"": regressor.C = 0.01 set_random_state(regressor) regressor.fit(X, y) y_pred = regressor.predict(X) assert y_pred.shape == y.shape, ( f""The shape of the prediction of {name} is not consistent with "" f""y.shape={y.shape}. We expect a shape of {y.shape} but predictions have a "" f"" shape of {y_pred.shape}."" ) y = y[:, np.newaxis] regressor.fit(X, y) y_pred = regressor.predict(X) assert y_pred.shape == (y.shape[0],), ( f""The shape of the prediction of {name} is not consistent with "" f""y.shape={y.shape}. We expect a shape of {(y.shape[0],)} but predictions have "" f""a shape of {y_pred.shape}."" ) " 12386,"def mount_cb(device, callback, data=None, mtype=None, update_env_for_mount=None): """""" Mount the device, call method 'callback' passing the directory in which it was mounted, then unmount. Return whatever 'callback' returned. If data != None, also pass data to callback. mtype is a filesystem type. it may be a list, string (a single fsname) or a list of fsnames. """""" if isinstance(mtype, str): mtypes = [mtype] elif isinstance(mtype, (list, tuple)): mtypes = list(mtype) elif mtype is None: mtypes = None else: raise TypeError( 'Unsupported type provided for mtype parameter: {_type}'.format( _type=type(mtype))) # clean up 'mtype' input a bit based on platform. platsys = platform.system().lower() if platsys == ""linux"": if mtypes is None: mtypes = [""auto""] elif platsys.endswith(""bsd""): if mtypes is None: mtypes = ['ufs', 'cd9660', 'msdos'] for index, mtype in enumerate(mtypes): if mtype == ""iso9660"": mtypes[index] = ""cd9660"" if mtype in [""vfat"", ""msdosfs"", ""msdos""]: mtypes[index] = ""msdos"" else: # we cannot do a smart ""auto"", so just call 'mount' once with no -t mtypes = [''] mounted = mounts() with temp_utils.tempdir() as tmpd: umount = False if os.path.realpath(device) in mounted: mountpoint = mounted[os.path.realpath(device)]['mountpoint'] else: failure_reason = None for mtype in mtypes: mountpoint = None try: mountcmd = ['mount', '-o', 'ro'] if mtype: mountcmd.extend(['-t', mtype]) mountcmd.append(device) mountcmd.append(tmpd) subp.subp(mountcmd, update_env=update_env_for_mount) umount = tmpd # This forces it to be unmounted (when set) mountpoint = tmpd break except (IOError, OSError) as exc: LOG.debug(""Failed mount of '%s' as '%s': %s"", device, mtype, exc) failure_reason = exc if not mountpoint: raise MountFailedError(""Failed mounting %s to %s due to: %s"" % (device, tmpd, failure_reason)) # Be nice and ensure it ends with a slash if not mountpoint.endswith(""/""): mountpoint += ""/"" with unmounter(umount): if data is None: ret = callback(mountpoint) else: ret = callback(mountpoint, data) return ret ","def mount_cb(device, callback, data=None, mtype=None, update_env_for_mount=None): """""" Mount the device, call method 'callback' passing the directory in which it was mounted, then unmount. Return whatever 'callback' returned. If data != None, also pass data to callback. mtype is a filesystem type. it may be a list, string (a single fsname) or a list of fsnames. """""" if isinstance(mtype, str): mtypes = [mtype] elif isinstance(mtype, (list, tuple)): mtypes = list(mtype) elif mtype is None: mtypes = None else: raise TypeError( 'Unsupported type provided for mtype parameter: {_type}'.format( _type=type(mtype))) # clean up 'mtype' input a bit based on platform. platsys = platform.system().lower() if platsys == ""linux"": if mtypes is None: mtypes = [""auto""] elif platsys.endswith(""bsd""): if mtypes is None: mtypes = ['ufs', 'cd9660', 'msdos'] for index, mtype in enumerate(mtypes): if mtype == ""iso9660"": mtypes[index] = ""cd9660"" if mtype in [""vfat"", ""msdosfs""]: mtypes[index] = ""msdos"" else: # we cannot do a smart ""auto"", so just call 'mount' once with no -t mtypes = [''] mounted = mounts() with temp_utils.tempdir() as tmpd: umount = False if os.path.realpath(device) in mounted: mountpoint = mounted[os.path.realpath(device)]['mountpoint'] else: failure_reason = None for mtype in mtypes: mountpoint = None try: mountcmd = ['mount', '-o', 'ro'] if mtype: mountcmd.extend(['-t', mtype]) mountcmd.append(device) mountcmd.append(tmpd) subp.subp(mountcmd, update_env=update_env_for_mount) umount = tmpd # This forces it to be unmounted (when set) mountpoint = tmpd break except (IOError, OSError) as exc: LOG.debug(""Failed mount of '%s' as '%s': %s"", device, mtype, exc) failure_reason = exc if not mountpoint: raise MountFailedError(""Failed mounting %s to %s due to: %s"" % (device, tmpd, failure_reason)) # Be nice and ensure it ends with a slash if not mountpoint.endswith(""/""): mountpoint += ""/"" with unmounter(umount): if data is None: ret = callback(mountpoint) else: ret = callback(mountpoint, data) return ret " 56045,"def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator() logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() # set up weights and biases if available if is_wandb_available() and args.wandb: import wandb wandb.init(project=args.output_dir.split(""/"")[-1]) else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # TODO support datasets from local folders dataset = load_dataset(args.dataset_name, cache_dir=args.cache_dir) # Rename column names to standardized names (only ""image"" and ""label"" need to be present) if ""pixel_values"" in dataset[""train""].column_names: dataset = dataset.rename_columns({""pixel_values"": ""image""}) if ""annotation"" in dataset[""train""].column_names: dataset = dataset.rename_columns({""annotation"": ""label""}) # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if ""validation"" in dataset.keys() else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = dataset[""train""].train_test_split(args.train_val_split) dataset[""train""] = split[""train""] dataset[""validation""] = split[""test""] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if args.dataset_name == ""scene_parse_150"": repo_id = ""datasets/huggingface/label-files"" filename = ""ade20k-id2label.json"" num_labels = 150 else: repo_id = f""datasets/{args.dataset_name}"" filename = ""id2label.json"" id2label = json.load(open(hf_hub_download(repo_id, filename), ""r"")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} num_labels = len(id2label) # Load pretrained model and feature extractor config = AutoConfig.from_pretrained( args.model_name_or_path, num_labels=num_labels, id2label=id2label, label2id=label2id ) feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_name_or_path) model = AutoModelForSemanticSegmentation.from_pretrained( args.model_name_or_path, config=config, ) # Preprocessing the datasets # Define torchvision transforms to be applied to each image + target. # Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9 # Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py _train_transforms = Compose( [ ReduceLabels() if args.reduce_labels else Identity(), RandomCrop(size=feature_extractor.size), RandomHorizontalFlip(flip_prob=0.5), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ] ) # Define torchvision transform to be applied to each image. # jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) _val_transforms = Compose( [ ReduceLabels() if args.reduce_labels else Identity(), Resize(size=(feature_extractor.size, feature_extractor.size)), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ] ) def train_transforms(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch[""image""], example_batch[""label""]): image, target = _train_transforms(image.convert(""RGB""), target) pixel_values.append(image) labels.append(target) encoding = dict() encoding[""pixel_values""] = torch.stack(pixel_values) encoding[""labels""] = torch.stack(labels) return encoding def val_transforms(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch[""image""], example_batch[""label""]): image, target = _val_transforms(image.convert(""RGB""), target) pixel_values.append(image) labels.append(target) encoding = dict() encoding[""pixel_values""] = torch.stack(pixel_values) encoding[""labels""] = torch.stack(labels) return encoding with accelerator.main_process_first(): train_dataset = dataset[""train""].with_transform(train_transforms) eval_dataset = dataset[""validation""].with_transform(val_transforms) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer optimizer = torch.optim.AdamW( list(model.parameters()), lr=args.learning_rate, betas=[args.adam_beta1, args.adam_beta2], eps=args.adam_epsilon, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Instantiate metric metric = load_metric(""mean_iou"") # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info(""***** Running training *****"") logger.info(f"" Num examples = {len(train_dataset)}"") logger.info(f"" Num Epochs = {args.num_train_epochs}"") logger.info(f"" Instantaneous batch size per device = {args.per_device_train_batch_size}"") logger.info(f"" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"") logger.info(f"" Gradient Accumulation steps = {args.gradient_accumulation_steps}"") logger.info(f"" Total optimization steps = {args.max_train_steps}"") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if completed_steps >= args.max_train_steps: break # Log all results if (step + 1) % (args.gradient_accumulation_steps * args.logging_steps) == 0: loss.detach() if accelerator.state.num_processes > 1: loss = accelerator.gather(loss).sum() / accelerator.num_processes train_logs = { ""loss"": loss, ""lr"": torch.tensor(optimizer.param_groups[0][""lr""]), } # Evaluate (gather required) with torch.no_grad(): upsampled_logits = torch.nn.functional.interpolate( outputs.logits, size=batch[""labels""].shape[-2:], mode=""bilinear"", align_corners=False ) predictions = upsampled_logits.argmax(dim=1) metric.add_batch( predictions=accelerator.gather(predictions), references=accelerator.gather(batch[""labels""]), ) train_metrics = metric.compute( num_labels=len(id2label), ignore_index=255, reduce_labels=False, # we've already reduced the labels before ) train_logs[""mean_iou""] = train_metrics[""mean_iou""] train_logs[""mean_accuracy""] = train_metrics[""mean_accuracy""] train_logs[""overall_accuracy""] = train_metrics[""overall_accuracy""] log_str = """" for k, v in train_logs.items(): if isinstance(v, torch.Tensor): log_str += ""| {}: {:.3e}"".format(k, v.item()) else: log_str += ""| {}: {:.3e}"".format(k, v) if accelerator.is_local_main_process: progress_bar.write(log_str) if is_wandb_available() and args.wandb: wandb.log(train_logs) # Save model every `args.saving_steps` steps if (step + 1) % (args.gradient_accumulation_steps * args.saving_steps) == 0: if (args.push_to_hub and epoch < args.num_train_epochs - 1) or args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if (args.push_to_hub and epoch < args.num_train_epochs - 1) and accelerator.is_main_process: repo.push_to_hub( commit_message=f""Training in progress step {completed_steps}"", blocking=False, auto_lfs_prune=True, ) logger.info(""***** Running evaluation *****"") model.eval() for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)): outputs = model(**batch) upsampled_logits = torch.nn.functional.interpolate( outputs.logits, size=batch[""labels""].shape[-2:], mode=""bilinear"", align_corners=False ) predictions = upsampled_logits.argmax(dim=1) metric.add_batch( predictions=accelerator.gather(predictions), references=accelerator.gather(batch[""labels""]), ) eval_metrics = metric.compute( num_labels=len(id2label), ignore_index=255, reduce_labels=False, # we've already reduced the labels before ) logger.info(f""epoch {epoch}: {eval_metrics}"") if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f""Training in progress epoch {epoch}"", blocking=False, auto_lfs_prune=True ) if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message=""End of training"", auto_lfs_prune=True) ","def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator() logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() if args.wandb: accelerator.init_trackers(args.output_dir.split(""/"")[-1]) # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # TODO support datasets from local folders dataset = load_dataset(args.dataset_name, cache_dir=args.cache_dir) # Rename column names to standardized names (only ""image"" and ""label"" need to be present) if ""pixel_values"" in dataset[""train""].column_names: dataset = dataset.rename_columns({""pixel_values"": ""image""}) if ""annotation"" in dataset[""train""].column_names: dataset = dataset.rename_columns({""annotation"": ""label""}) # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if ""validation"" in dataset.keys() else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = dataset[""train""].train_test_split(args.train_val_split) dataset[""train""] = split[""train""] dataset[""validation""] = split[""test""] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if args.dataset_name == ""scene_parse_150"": repo_id = ""datasets/huggingface/label-files"" filename = ""ade20k-id2label.json"" num_labels = 150 else: repo_id = f""datasets/{args.dataset_name}"" filename = ""id2label.json"" id2label = json.load(open(hf_hub_download(repo_id, filename), ""r"")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} num_labels = len(id2label) # Load pretrained model and feature extractor config = AutoConfig.from_pretrained( args.model_name_or_path, num_labels=num_labels, id2label=id2label, label2id=label2id ) feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_name_or_path) model = AutoModelForSemanticSegmentation.from_pretrained( args.model_name_or_path, config=config, ) # Preprocessing the datasets # Define torchvision transforms to be applied to each image + target. # Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9 # Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py _train_transforms = Compose( [ ReduceLabels() if args.reduce_labels else Identity(), RandomCrop(size=feature_extractor.size), RandomHorizontalFlip(flip_prob=0.5), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ] ) # Define torchvision transform to be applied to each image. # jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) _val_transforms = Compose( [ ReduceLabels() if args.reduce_labels else Identity(), Resize(size=(feature_extractor.size, feature_extractor.size)), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ] ) def train_transforms(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch[""image""], example_batch[""label""]): image, target = _train_transforms(image.convert(""RGB""), target) pixel_values.append(image) labels.append(target) encoding = dict() encoding[""pixel_values""] = torch.stack(pixel_values) encoding[""labels""] = torch.stack(labels) return encoding def val_transforms(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch[""image""], example_batch[""label""]): image, target = _val_transforms(image.convert(""RGB""), target) pixel_values.append(image) labels.append(target) encoding = dict() encoding[""pixel_values""] = torch.stack(pixel_values) encoding[""labels""] = torch.stack(labels) return encoding with accelerator.main_process_first(): train_dataset = dataset[""train""].with_transform(train_transforms) eval_dataset = dataset[""validation""].with_transform(val_transforms) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer optimizer = torch.optim.AdamW( list(model.parameters()), lr=args.learning_rate, betas=[args.adam_beta1, args.adam_beta2], eps=args.adam_epsilon, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Instantiate metric metric = load_metric(""mean_iou"") # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info(""***** Running training *****"") logger.info(f"" Num examples = {len(train_dataset)}"") logger.info(f"" Num Epochs = {args.num_train_epochs}"") logger.info(f"" Instantaneous batch size per device = {args.per_device_train_batch_size}"") logger.info(f"" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"") logger.info(f"" Gradient Accumulation steps = {args.gradient_accumulation_steps}"") logger.info(f"" Total optimization steps = {args.max_train_steps}"") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if completed_steps >= args.max_train_steps: break # Log all results if (step + 1) % (args.gradient_accumulation_steps * args.logging_steps) == 0: loss.detach() if accelerator.state.num_processes > 1: loss = accelerator.gather(loss).sum() / accelerator.num_processes train_logs = { ""loss"": loss, ""lr"": torch.tensor(optimizer.param_groups[0][""lr""]), } # Evaluate (gather required) with torch.no_grad(): upsampled_logits = torch.nn.functional.interpolate( outputs.logits, size=batch[""labels""].shape[-2:], mode=""bilinear"", align_corners=False ) predictions = upsampled_logits.argmax(dim=1) metric.add_batch( predictions=accelerator.gather(predictions), references=accelerator.gather(batch[""labels""]), ) train_metrics = metric.compute( num_labels=len(id2label), ignore_index=255, reduce_labels=False, # we've already reduced the labels before ) train_logs[""mean_iou""] = train_metrics[""mean_iou""] train_logs[""mean_accuracy""] = train_metrics[""mean_accuracy""] train_logs[""overall_accuracy""] = train_metrics[""overall_accuracy""] log_str = """" for k, v in train_logs.items(): if isinstance(v, torch.Tensor): log_str += ""| {}: {:.3e}"".format(k, v.item()) else: log_str += ""| {}: {:.3e}"".format(k, v) if accelerator.is_local_main_process: progress_bar.write(log_str) if is_wandb_available() and args.wandb: wandb.log(train_logs) # Save model every `args.saving_steps` steps if (step + 1) % (args.gradient_accumulation_steps * args.saving_steps) == 0: if (args.push_to_hub and epoch < args.num_train_epochs - 1) or args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if (args.push_to_hub and epoch < args.num_train_epochs - 1) and accelerator.is_main_process: repo.push_to_hub( commit_message=f""Training in progress step {completed_steps}"", blocking=False, auto_lfs_prune=True, ) logger.info(""***** Running evaluation *****"") model.eval() for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)): outputs = model(**batch) upsampled_logits = torch.nn.functional.interpolate( outputs.logits, size=batch[""labels""].shape[-2:], mode=""bilinear"", align_corners=False ) predictions = upsampled_logits.argmax(dim=1) metric.add_batch( predictions=accelerator.gather(predictions), references=accelerator.gather(batch[""labels""]), ) eval_metrics = metric.compute( num_labels=len(id2label), ignore_index=255, reduce_labels=False, # we've already reduced the labels before ) logger.info(f""epoch {epoch}: {eval_metrics}"") if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f""Training in progress epoch {epoch}"", blocking=False, auto_lfs_prune=True ) if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message=""End of training"", auto_lfs_prune=True) " 58658,"def determine_entity_for_token( token: Token, entities: List[Dict[Text, Any]], extractors: Optional[Set[Text]] = None, ) -> Optional[Dict[Text, Any]]: """""" Determines the best fitting entity for the given token given entities that do not overlap. Args: token: a single token entities: entities found by a single extractor extractors: list of extractors Returns: entity type """""" if entities is None or len(entities) == 0: return None if not do_extractors_support_overlap(extractors) and do_entities_overlap(entities): raise ValueError(""The possible entities should not overlap."") candidates = find_intersecting_entities(token, entities) return pick_best_entity_fit(token, candidates) ","def determine_entity_for_token( token: Token, entities: List[Dict[Text, Any]], extractors: Optional[Set[Text]] = None, ) -> Optional[Dict[Text, Any]]: """""" Determines the best fitting entity for the given token given entities that do not overlap. Args: token: a single token entities: entities found by a single extractor extractors: list of extractors Returns: entity type """""" if entities is None or len(entities) == 0: return None if not do_extractors_support_overlap(extractors) and do_entities_overlap(entities): raise ValueError(""The possible entities should not overlap."") candidates = find_intersecting_entities(token, entities) return pick_best_entity_fit(token, candidates) " 23034,"def optimize_read_parquet_getitem(dsk): # find the keys to optimze from .io.parquet.core import ParquetSubgraph read_parquets = [k for k, v in dsk.layers.items() if isinstance(v, ParquetSubgraph)] layers_dict = dsk.layers.copy() for k in read_parquets: columns = set() for dep in dsk.dependents[k]: block = dsk.layers[dep] if not isinstance(block, Blockwise): return dsk if len(block.dsk) != 1: # TODO return dsk if list(block.dsk.values())[0][0] != operator.getitem: return dsk block_columns = block.indices[1][0] # like ('B', None) if isinstance(block_columns, str): block_columns = [block_columns] block_columns = set(block_columns) columns |= block_columns old = layers_dict[k] if columns: columns = list(columns) meta = old.meta[columns] else: meta = old.meta columns = list(meta.columns) new = ParquetSubgraph( old.name, old.engine, old.fs, meta, columns, old.index, old.parts, old.kwargs, ) layers_dict[k] = new return HighLevelGraph(layers_dict, dsk.dependencies) ","def optimize_read_parquet_getitem(dsk): # find the keys to optimze from .io.parquet.core import ParquetSubgraph read_parquets = [k for k, v in dsk.layers.items() if isinstance(v, ParquetSubgraph)] layers_dict = dsk.layers.copy() for k in read_parquets: columns = set() for dep in dsk.dependents[k]: block = dsk.layers[dep] if not isinstance(block, Blockwise): return dsk if len(block.dsk) != 1: # TODO return dsk if list(block.dsk.values())[0][0] != operator.getitem: return dsk block_columns = block.indices[1][0] # like ('B', None) if isinstance(block_columns, str): block_columns = [block_columns] block_columns = set(block_columns) columns |= set(block_columns) old = layers_dict[k] if columns: columns = list(columns) meta = old.meta[columns] else: meta = old.meta columns = list(meta.columns) new = ParquetSubgraph( old.name, old.engine, old.fs, meta, columns, old.index, old.parts, old.kwargs, ) layers_dict[k] = new return HighLevelGraph(layers_dict, dsk.dependencies) " 8946,"def shutdown(bot): """"""Clean-up memory after the plugin."""""" try: bot.memory['join_events_queue'].clear() except KeyError: pass ","def shutdown(bot): """"""Clean up coretasks-related values in the bot's memory."""""" try: bot.memory['join_events_queue'].clear() except KeyError: pass " 25675,"def default_summary_fmt(): """"""Summary printing format."""""" return config().summary_fmt ","def default_summary_fmt(): """"""Summary printing format as understood by :mod:`tabulate` or a special case ""notebook""."""""" return config().summary_fmt " 3313,"def _get_constrained_date_range(params, allow_minute_resolution=False): interval = parse_stats_period(params.get(""interval"", ""1h"")) interval = int(3600 if interval is None else interval.total_seconds()) smallest_interval = ONE_MINUTE if allow_minute_resolution else ONE_HOUR if interval % smallest_interval != 0 or interval < smallest_interval: interval_str = ""one minute"" if allow_minute_resolution else ""one hour"" raise InvalidParams( f""The interval has to be a multiple of the minimum interval of {interval_str}."" ) if interval > ONE_DAY: raise InvalidParams(""The interval has to be less than one day."") if ONE_DAY % interval != 0: raise InvalidParams(""The interval should divide one day cleanly."") using_minute_resolution = interval % ONE_HOUR != 0 start, end = get_date_range_from_params(params) # if `end` is explicitly given, we add a second to it, so it is treated as # inclusive. the rounding logic down below will take care of the rest. if params.get(""end""): end += timedelta(seconds=1) date_range = end - start # round the range up to a multiple of the interval. # the minimum is 1h so the ""totals"" will not go out of sync, as they will # use the materialized storage due to no grouping on the `started` column. rounding_interval = int(math.ceil(interval / ONE_HOUR) * ONE_HOUR) date_range = timedelta( seconds=int(rounding_interval * math.ceil(date_range.total_seconds() / rounding_interval)) ) if using_minute_resolution: if date_range.total_seconds() > 6 * ONE_HOUR: raise InvalidParams( ""The time-range when using one-minute resolution intervals is restricted to 6 hours."" ) if (datetime.now(tz=pytz.utc) - start).total_seconds() > 30 * ONE_DAY: raise InvalidParams( ""The time-range when using one-minute resolution intervals is restricted to the last 30 days."" ) if date_range.total_seconds() / interval > MAX_POINTS: raise InvalidParams( ""Your interval and date range would create too many results. "" ""Use a larger interval, or a smaller date range."" ) end_ts = int(rounding_interval * math.ceil(to_timestamp(end) / rounding_interval)) end = to_datetime(end_ts) # when expanding the rounding interval, we would adjust the end time too far # to the future, in which case the start time would not actually contain our # desired date range. adjust for this by extend the time by another interval. # for example, when ""45m"" means the range from 08:49:00-09:34:00, our rounding # has to go from 08:00:00 to 10:00:00. if rounding_interval > interval and (end - date_range) > start: date_range += timedelta(seconds=rounding_interval) start = end - date_range return start, end, interval ","def _get_constrained_date_range(params, allow_minute_resolution=False): interval = parse_stats_period(params.get(""interval"", ""1h"")) interval = int(3600 if interval is None else interval.total_seconds()) smallest_interval = ONE_MINUTE if allow_minute_resolution else ONE_HOUR if interval % smallest_interval != 0 or interval < smallest_interval: interval_str = ""one minute"" if allow_minute_resolution else ""one hour"" raise InvalidParams( f""The interval has to be a multiple of the minimum interval of {interval_str}."" ) if interval > ONE_DAY: raise InvalidParams(""The interval has to be less than one day."") if ONE_DAY % interval != 0: raise InvalidParams(""The interval should divide one day without a remainder."") using_minute_resolution = interval % ONE_HOUR != 0 start, end = get_date_range_from_params(params) # if `end` is explicitly given, we add a second to it, so it is treated as # inclusive. the rounding logic down below will take care of the rest. if params.get(""end""): end += timedelta(seconds=1) date_range = end - start # round the range up to a multiple of the interval. # the minimum is 1h so the ""totals"" will not go out of sync, as they will # use the materialized storage due to no grouping on the `started` column. rounding_interval = int(math.ceil(interval / ONE_HOUR) * ONE_HOUR) date_range = timedelta( seconds=int(rounding_interval * math.ceil(date_range.total_seconds() / rounding_interval)) ) if using_minute_resolution: if date_range.total_seconds() > 6 * ONE_HOUR: raise InvalidParams( ""The time-range when using one-minute resolution intervals is restricted to 6 hours."" ) if (datetime.now(tz=pytz.utc) - start).total_seconds() > 30 * ONE_DAY: raise InvalidParams( ""The time-range when using one-minute resolution intervals is restricted to the last 30 days."" ) if date_range.total_seconds() / interval > MAX_POINTS: raise InvalidParams( ""Your interval and date range would create too many results. "" ""Use a larger interval, or a smaller date range."" ) end_ts = int(rounding_interval * math.ceil(to_timestamp(end) / rounding_interval)) end = to_datetime(end_ts) # when expanding the rounding interval, we would adjust the end time too far # to the future, in which case the start time would not actually contain our # desired date range. adjust for this by extend the time by another interval. # for example, when ""45m"" means the range from 08:49:00-09:34:00, our rounding # has to go from 08:00:00 to 10:00:00. if rounding_interval > interval and (end - date_range) > start: date_range += timedelta(seconds=rounding_interval) start = end - date_range return start, end, interval " 56169,"def test_cli_get_config(): config = get_config( ""test/fixtures/config/toml/pyproject.toml"", True, nocolor=False, verbose=0, ) assert config.get(""nocolor"") is True assert config.get(""verbose"") == 2 ","def test_cli_get_config(): config = get_config( ""test/fixtures/config/toml/pyproject.toml"", True, nocolor=None, verbose=None, ) assert config.get(""nocolor"") is True assert config.get(""verbose"") == 2 " 8774,"def is_triggerable(obj): """"""Check if ``obj`` can handle the bot's triggers. :param obj: any :term:`function` to check. :return: ``True`` if ``obj`` can handle the bot's triggers. A triggerable is a callable that will be used by the bot to handle a particular trigger (i.e. an IRC message): it can be a regex rule, an event, an intent, a command or nickname command. However, it must not be a job or an URL callback. .. seealso:: The :mod:`sopel.module` defines decorators to make a function a triggerable object. """""" forbidden = ( 'interval', 'url_regex', ) must_not = not any(hasattr(obj, attr) for attr in forbidden) allowed = ( 'rule', 'event', 'intents', 'commands', 'nickname_commands', ) return must_not and any(hasattr(obj, attr) for attr in allowed) ","def is_triggerable(obj): """"""Check if ``obj`` can handle the bot's triggers. :param obj: any :term:`function` to check. :return: ``True`` if ``obj`` can handle the bot's triggers. A triggerable is a callable that will be used by the bot to handle a particular trigger (i.e. an IRC message): it can be a regex rule, an event, an intent, a command or nickname command. However, it must not be a job or an URL callback. .. seealso:: Many of the decorators defined in :mod:`sopel.module` make the decorated function a triggerable object. """""" forbidden = ( 'interval', 'url_regex', ) must_not = not any(hasattr(obj, attr) for attr in forbidden) allowed = ( 'rule', 'event', 'intents', 'commands', 'nickname_commands', ) return must_not and any(hasattr(obj, attr) for attr in allowed) " 19935,"def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), objectstore=dict(type='str', required=False, choices=[ 'bluestore', 'filestore'], default='bluestore'), action=dict(type='str', required=False, choices=[ 'create', 'zap', 'batch', 'prepare', 'activate', 'list'], default='create'), # noqa 4502 data=dict(type='str', required=False), data_vg=dict(type='str', required=False), journal=dict(type='str', required=False), journal_vg=dict(type='str', required=False), db=dict(type='str', required=False), db_vg=dict(type='str', required=False), wal=dict(type='str', required=False), wal_vg=dict(type='str', required=False), crush_device_class=dict(type='str', required=False), dmcrypt=dict(type='bool', required=False, default=False), batch_devices=dict(type='list', required=False, default=[]), osds_per_device=dict(type='int', required=False, default=1), journal_size=dict(type='str', required=False, default='5120'), block_db_size=dict(type='str', required=False, default='-1'), report=dict(type='bool', required=False, default=False), containerized=dict(type='str', required=False, default=False), ) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True ) result = dict( changed=False, stdout='', stderr='', rc='', start='', end='', delta='', ) if module.check_mode: return result # start execution startd = datetime.datetime.now() # get the desired action action = module.params['action'] # will return either the image name or None container_image = is_containerized() # Assume the task's status will be 'changed' changed = True if action == 'create' or action == 'prepare': # First test if the device has Ceph LVM Metadata rc, cmd, out, err = exec_command( module, list_osd(module, container_image)) # list_osd returns a dict, if the dict is empty this means # we can not check the return code since it's not consistent # with the plain output # see: http://tracker.ceph.com/issues/36329 # FIXME: it's probably less confusing to check for rc # convert out to json, ansible returns a string... try: out_dict = json.loads(out) except ValueError: fatal(""Could not decode json output: {} from the command {}"".format(out, cmd), module) # noqa E501 if out_dict: data = module.params['data'] result['stdout'] = 'skipped, since {0} is already used for an osd'.format( # noqa E501 data) result['rc'] = 0 module.exit_json(**result) # Prepare or create the OSD rc, cmd, out, err = exec_command( module, prepare_or_create_osd(module, action, container_image)) elif action == 'activate': if container_image: fatal( ""This is not how container's activation happens, nothing to activate"", module) # noqa E501 # Activate the OSD rc, cmd, out, err = exec_command( module, activate_osd()) elif action == 'zap': # Zap the OSD rc, cmd, out, err = exec_command( module, zap_devices(module, container_image)) elif action == 'list': # List Ceph LVM Metadata on a device rc, cmd, out, err = exec_command( module, list_osd(module, container_image)) elif action == 'batch': # Batch prepare AND activate OSDs report = module.params.get('report', None) # Add --report flag for the idempotency test report_flags = [ '--report', '--format=json', ] cmd = batch(module, container_image) batch_report_cmd = copy.copy(cmd) batch_report_cmd.extend(report_flags) # Run batch --report to see what's going to happen # Do not run the batch command if there is nothing to do rc, cmd, out, err = exec_command( module, batch_report_cmd) try: report_result = json.loads(out) except ValueError: strategy_change = ""strategy changed"" in out if strategy_change: out = json.dumps({""changed"": False, ""stdout"": out.rstrip(b""\r\n"")}) rc = 0 changed = False else: out = out.rstrip(b""\r\n"") result = dict( cmd=cmd, stdout=out, stderr=err.rstrip(b""\r\n""), rc=rc, changed=changed, ) if strategy_change: module.exit_json(**result) module.fail_json(msg='non-zero return code', **result) if not report: # if not asking for a report, let's just run the batch command changed = report_result['changed'] if changed: # Batch prepare the OSD rc, cmd, out, err = exec_command( module, batch(module, container_image)) else: cmd = batch_report_cmd else: module.fail_json( msg='State must either be ""create"" or ""prepare"" or ""activate"" or ""list"" or ""zap"" or ""batch"".', changed=False, rc=1) # noqa E501 endd = datetime.datetime.now() delta = endd - startd result = dict( cmd=cmd, start=str(startd), end=str(endd), delta=str(delta), rc=rc, stdout=out.rstrip(b'\r\n'), stderr=err.rstrip(b'\r\n'), changed=changed, ) if rc != 0: module.fail_json(msg='non-zero return code', **result) module.exit_json(**result) ","def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), objectstore=dict(type='str', required=False, choices=[ 'bluestore', 'filestore'], default='bluestore'), action=dict(type='str', required=False, choices=[ 'create', 'zap', 'batch', 'prepare', 'activate', 'list'], default='create'), # noqa 4502 data=dict(type='str', required=False), data_vg=dict(type='str', required=False), journal=dict(type='str', required=False), journal_vg=dict(type='str', required=False), db=dict(type='str', required=False), db_vg=dict(type='str', required=False), wal=dict(type='str', required=False), wal_vg=dict(type='str', required=False), crush_device_class=dict(type='str', required=False), dmcrypt=dict(type='bool', required=False, default=False), batch_devices=dict(type='list', required=False, default=[]), osds_per_device=dict(type='int', required=False, default=1), journal_size=dict(type='str', required=False, default='5120'), block_db_size=dict(type='str', required=False, default='-1'), report=dict(type='bool', required=False, default=False), containerized=dict(type='str', required=False, default=False), ) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True ) result = dict( changed=False, stdout='', stderr='', rc='', start='', end='', delta='', ) if module.check_mode: return result # start execution startd = datetime.datetime.now() # get the desired action action = module.params['action'] # will return either the image name or None container_image = is_containerized() # Assume the task's status will be 'changed' changed = True if action == 'create' or action == 'prepare': # First test if the device has Ceph LVM Metadata rc, cmd, out, err = exec_command( module, list_osd(module, container_image)) # list_osd returns a dict, if the dict is empty this means # we can not check the return code since it's not consistent # with the plain output # see: http://tracker.ceph.com/issues/36329 # FIXME: it's probably less confusing to check for rc # convert out to json, ansible returns a string... try: out_dict = json.loads(out) except ValueError: fatal(""Could not decode json output: {} from the command {}"".format(out, cmd), module) # noqa E501 if out_dict: data = module.params['data'] result['stdout'] = 'skipped, since {0} is already used for an osd'.format( # noqa E501 data) result['rc'] = 0 module.exit_json(**result) # Prepare or create the OSD rc, cmd, out, err = exec_command( module, prepare_or_create_osd(module, action, container_image)) elif action == 'activate': if container_image: fatal( ""This is not how container's activation happens, nothing to activate"", module) # noqa E501 # Activate the OSD rc, cmd, out, err = exec_command( module, activate_osd()) elif action == 'zap': # Zap the OSD rc, cmd, out, err = exec_command( module, zap_devices(module, container_image)) elif action == 'list': # List Ceph LVM Metadata on a device rc, cmd, out, err = exec_command( module, list_osd(module, container_image)) elif action == 'batch': # Batch prepare AND activate OSDs report = module.params.get('report', None) # Add --report flag for the idempotency test report_flags = [ '--report', '--format=json', ] cmd = batch(module, container_image) batch_report_cmd = copy.copy(cmd) batch_report_cmd.extend(report_flags) # Run batch --report to see what's going to happen # Do not run the batch command if there is nothing to do rc, cmd, out, err = exec_command( module, batch_report_cmd) try: report_result = json.loads(out) except ValueError: strategy_change = ""strategy changed"" in out if strategy_change: out = json.dumps({""changed"": False, ""stdout"": out.rstrip(""\r\n"")}) rc = 0 changed = False else: out = out.rstrip(b""\r\n"") result = dict( cmd=cmd, stdout=out, stderr=err.rstrip(b""\r\n""), rc=rc, changed=changed, ) if strategy_change: module.exit_json(**result) module.fail_json(msg='non-zero return code', **result) if not report: # if not asking for a report, let's just run the batch command changed = report_result['changed'] if changed: # Batch prepare the OSD rc, cmd, out, err = exec_command( module, batch(module, container_image)) else: cmd = batch_report_cmd else: module.fail_json( msg='State must either be ""create"" or ""prepare"" or ""activate"" or ""list"" or ""zap"" or ""batch"".', changed=False, rc=1) # noqa E501 endd = datetime.datetime.now() delta = endd - startd result = dict( cmd=cmd, start=str(startd), end=str(endd), delta=str(delta), rc=rc, stdout=out.rstrip(b'\r\n'), stderr=err.rstrip(b'\r\n'), changed=changed, ) if rc != 0: module.fail_json(msg='non-zero return code', **result) module.exit_json(**result) " 14697,"def setup(hass, config): """"""Set up the Netatmo devices."""""" import pyatmo hass.data[DATA_PERSONS] = {} try: conf = pyatmo.ClientAuth( config[DOMAIN][CONF_API_KEY], config[DOMAIN][CONF_SECRET_KEY], config[DOMAIN][CONF_USERNAME], config[DOMAIN][CONF_PASSWORD], 'read_station read_camera access_camera ' 'read_thermostat write_thermostat ' 'read_presence access_presence read_homecoach') except HTTPError: _LOGGER.error(""Unable to connect to Netatmo API"") return False if config[DOMAIN][CONF_DISCOVERY]: for component in 'camera', 'sensor', 'binary_sensor', 'climate': discovery.load_platform(hass, component, DOMAIN, {}, config) if config[DOMAIN][CONF_WEBHOOKS]: webhook_id = hass.components.webhook.async_generate_id() hass.data[ DATA_WEBHOOK_URL] = hass.components.webhook.async_generate_url( webhook_id) hass.components.webhook.async_register( DOMAIN, 'Netatmo', webhook_id, handle_webhook) conf.addwebhook(hass.data[DATA_WEBHOOK_URL]) hass.bus.listen_once( EVENT_HOMEASSISTANT_STOP, dropwebhook) def _service_addwebhook(service): """"""Service to (re)add webhooks during runtime."""""" url = service.data.get(CONF_URL) if url is None: url = hass.data[DATA_WEBHOOK_URL] _LOGGER.info(""Adding webhook for URL: %s"", url) conf.addwebhook(url) hass.services.register( DOMAIN, SERVICE_ADDWEBHOOK, _service_addwebhook, schema=SCHEMA_SERVICE_ADDWEBHOOK) def _service_dropwebhook(service): """"""Service to drop webhooks during runtime."""""" _LOGGER.info(""Dropping webhook"") conf.dropwebhook() hass.services.register( DOMAIN, SERVICE_DROPWEBHOOK, _service_dropwebhook, schema=SCHEMA_SERVICE_DROPWEBHOOK) # Store config to be used during entry setup hass.data[DATA_NETATMO_CONFIG] = conf return True ","def setup(hass, config): """"""Set up the Netatmo devices."""""" import pyatmo hass.data[DATA_PERSONS] = {} try: conf = pyatmo.ClientAuth( config[DOMAIN][CONF_API_KEY], config[DOMAIN][CONF_SECRET_KEY], config[DOMAIN][CONF_USERNAME], config[DOMAIN][CONF_PASSWORD], 'read_station read_camera access_camera ' 'read_thermostat write_thermostat ' 'read_presence access_presence read_homecoach') except HTTPError: _LOGGER.error(""Unable to connect to Netatmo API"") return False if config[DOMAIN][CONF_DISCOVERY]: for component in 'camera', 'sensor', 'binary_sensor', 'climate': discovery.load_platform(hass, component, DOMAIN, {}, config) if config[DOMAIN][CONF_WEBHOOKS]: webhook_id = hass.components.webhook.async_generate_id() hass.data[ DATA_WEBHOOK_URL] = hass.components.webhook.async_generate_url( webhook_id) hass.components.webhook.async_register( DOMAIN, 'Netatmo', webhook_id, handle_webhook) conf.addwebhook(hass.data[DATA_WEBHOOK_URL]) hass.bus.listen_once( EVENT_HOMEASSISTANT_STOP, dropwebhook) def _service_addwebhook(service): """"""Service to (re)add webhooks during runtime."""""" url = service.data.get(CONF_URL) if url is None: url = hass.data[DATA_WEBHOOK_URL] _LOGGER.info(""Adding webhook for URL: %s"", url) conf.addwebhook(url) hass.services.register( DOMAIN, SERVICE_ADDWEBHOOK, _service_addwebhook, schema=SCHEMA_SERVICE_ADDWEBHOOK) def _service_dropwebhook(service): """"""Service to drop webhooks during runtime."""""" _LOGGER.info(""Dropping webhook"") conf.dropwebhook() hass.services.register( DOMAIN, SERVICE_DROPWEBHOOK, _service_dropwebhook, schema=SCHEMA_SERVICE_DROPWEBHOOK) # Store config to be used during entry setup hass.data[DATA_NETATMO_AUTH] = auth return True " 9784,"def main(): argument_spec = vmware_argument_spec() argument_spec.update(datacenter=dict(type=""str"", default=""ha-datacenter""), cluster_name=dict(type=""str""), folder=dict(type=""str"", default=""/vm""), name=dict(type=""str"", required=True), esxi_hostname=dict(type=""str""), path=dict(type=""str""), template=dict(type=""bool"", default=False), resource_pool=dict(type=""str""), state=dict(type=""str"", default=""present"", cohices=[""present"", ""absent""])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) vmware_guest_register_operation = VMwareGuestRegisterOperation(module) vmware_guest_register_operation.execute() ","def main(): argument_spec = vmware_argument_spec() argument_spec.update(datacenter=dict(type=""str"", default=""ha-datacenter""), cluster_name=dict(type=""str""), folder=dict(type=""str""), name=dict(type=""str"", required=True), esxi_hostname=dict(type=""str""), path=dict(type=""str""), template=dict(type=""bool"", default=False), resource_pool=dict(type=""str""), state=dict(type=""str"", default=""present"", cohices=[""present"", ""absent""])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) vmware_guest_register_operation = VMwareGuestRegisterOperation(module) vmware_guest_register_operation.execute() " 35654,"def get_args_parser(add_help=True): import argparse parser = argparse.ArgumentParser(description=""PyTorch Classification Training"", add_help=add_help) parser.add_argument(""--data-path"", default=""/datasets01/imagenet_full_size/061417/"", type=str, help=""dataset path"") parser.add_argument(""--model"", default=""resnet18"", type=str, help=""model name"") parser.add_argument(""--device"", default=""cuda"", type=str, help=""device (cuda for GPU use, cpu for not using GPU"") parser.add_argument(""-b"", ""--batch-size"", default=32, type=int) parser.add_argument(""--epochs"", default=90, type=int, metavar=""N"", help=""number of total epochs to run"") parser.add_argument( ""-j"", ""--workers"", default=16, type=int, metavar=""N"", help=""number of data loading workers (default: 16)"" ) parser.add_argument(""--opt"", default=""sgd"", type=str, help=""optimizer"") parser.add_argument(""--lr"", default=0.1, type=float, help=""initial learning rate"") parser.add_argument(""--momentum"", default=0.9, type=float, metavar=""M"", help=""momentum"") parser.add_argument( ""--wd"", ""--weight-decay"", default=1e-4, type=float, metavar=""W"", help=""weight decay (default: 1e-4)"", dest=""weight_decay"", ) parser.add_argument( ""--norm-weight-decay"", default=None, type=float, help=""weight decay for Normalization layers (default: None, same value as --wd)"", ) parser.add_argument( ""--label-smoothing"", default=0.0, type=float, help=""label smoothing (default: 0.0)"", dest=""label_smoothing"" ) parser.add_argument(""--mixup-alpha"", default=0.0, type=float, help=""mixup alpha (default: 0.0)"") parser.add_argument(""--cutmix-alpha"", default=0.0, type=float, help=""cutmix alpha (default: 0.0)"") parser.add_argument(""--lr-scheduler"", default=""steplr"", type=str, help=""the lr scheduler (default: steplr)"") parser.add_argument(""--lr-warmup-epochs"", default=0, type=int, help=""the number of epochs to warmup (default: 0)"") parser.add_argument( ""--lr-warmup-method"", default=""constant"", type=str, help=""the warmup method (default: constant)"" ) parser.add_argument(""--lr-warmup-decay"", default=0.01, type=float, help=""the decay for lr"") parser.add_argument(""--lr-step-size"", default=30, type=int, help=""decrease lr every step-size epochs"") parser.add_argument(""--lr-gamma"", default=0.1, type=float, help=""decrease lr by a factor of lr-gamma"") parser.add_argument(""--print-freq"", default=10, type=int, help=""print frequency"") parser.add_argument(""--output-dir"", default=""."", type=str, help=""path to save outputs"") parser.add_argument(""--resume"", default="""", type=str, help=""path for resume checkpoint from"") parser.add_argument(""--start-epoch"", default=0, type=int, metavar=""N"", help=""start epoch"") parser.add_argument( ""--cache-dataset"", dest=""cache_dataset"", help=""Cache the datasets for quicker initialization. It also serializes the transforms"", action=""store_true"", ) parser.add_argument( ""--sync-bn"", dest=""sync_bn"", help=""Use sync batch norm"", action=""store_true"", ) parser.add_argument( ""--test-only"", dest=""test_only"", help=""Only test the model"", action=""store_true"", ) parser.add_argument( ""--pretrained"", dest=""pretrained"", help=""Use pre-trained models from the modelzoo"", action=""store_true"", ) parser.add_argument(""--auto-augment"", default=None, type=str, help=""auto augment policy (default: None)"") parser.add_argument(""--random-erase"", default=0.0, type=float, help=""random erasing probability (default: 0.0)"") # Mixed precision training parameters parser.add_argument(""--amp"", action=""store_true"", help=""Use torch.cuda.amp for mixed precision training"") # distributed training parameters parser.add_argument(""--world-size"", default=1, type=int, help=""number of distributed processes"") parser.add_argument(""--dist-url"", default=""env://"", type=str, help=""url used to set up distributed training"") parser.add_argument( ""--model-ema"", action=""store_true"", help=""enable tracking Exponential Moving Average of model parameters"" ) parser.add_argument( ""--model-ema-steps"", type=int, default=32, help=""the number of iterations that controls how often to update the EMA model (default: 32)"", ) parser.add_argument( ""--model-ema-decay"", type=float, default=0.99998, help=""decay factor for Exponential Moving Average of model parameters (default: 0.99998)"", ) parser.add_argument( ""--use-deterministic-algorithms"", action=""store_true"", help=""Forces the use of deterministic algorithms only."" ) parser.add_argument( ""--interpolation"", default=""bilinear"", type=str, help=""the interpolation method (default: bilinear)"" ) parser.add_argument( ""--val-resize-size"", default=256, type=int, help=""the resize size used for validation (default: 256)"" ) parser.add_argument( ""--val-crop-size"", default=224, type=int, help=""the central crop size used for validation (default: 224)"" ) parser.add_argument( ""--train-crop-size"", default=224, type=int, help=""the random crop size used for training (default: 224)"" ) return parser ","def get_args_parser(add_help=True): import argparse parser = argparse.ArgumentParser(description=""PyTorch Classification Training"", add_help=add_help) parser.add_argument(""--data-path"", default=""/datasets01/imagenet_full_size/061417/"", type=str, help=""dataset path"") parser.add_argument(""--model"", default=""resnet18"", type=str, help=""model name"") parser.add_argument(""--device"", default=""cuda"", type=str, help=""device (Use cuda or cpu Default: cuda"") parser.add_argument(""-b"", ""--batch-size"", default=32, type=int) parser.add_argument(""--epochs"", default=90, type=int, metavar=""N"", help=""number of total epochs to run"") parser.add_argument( ""-j"", ""--workers"", default=16, type=int, metavar=""N"", help=""number of data loading workers (default: 16)"" ) parser.add_argument(""--opt"", default=""sgd"", type=str, help=""optimizer"") parser.add_argument(""--lr"", default=0.1, type=float, help=""initial learning rate"") parser.add_argument(""--momentum"", default=0.9, type=float, metavar=""M"", help=""momentum"") parser.add_argument( ""--wd"", ""--weight-decay"", default=1e-4, type=float, metavar=""W"", help=""weight decay (default: 1e-4)"", dest=""weight_decay"", ) parser.add_argument( ""--norm-weight-decay"", default=None, type=float, help=""weight decay for Normalization layers (default: None, same value as --wd)"", ) parser.add_argument( ""--label-smoothing"", default=0.0, type=float, help=""label smoothing (default: 0.0)"", dest=""label_smoothing"" ) parser.add_argument(""--mixup-alpha"", default=0.0, type=float, help=""mixup alpha (default: 0.0)"") parser.add_argument(""--cutmix-alpha"", default=0.0, type=float, help=""cutmix alpha (default: 0.0)"") parser.add_argument(""--lr-scheduler"", default=""steplr"", type=str, help=""the lr scheduler (default: steplr)"") parser.add_argument(""--lr-warmup-epochs"", default=0, type=int, help=""the number of epochs to warmup (default: 0)"") parser.add_argument( ""--lr-warmup-method"", default=""constant"", type=str, help=""the warmup method (default: constant)"" ) parser.add_argument(""--lr-warmup-decay"", default=0.01, type=float, help=""the decay for lr"") parser.add_argument(""--lr-step-size"", default=30, type=int, help=""decrease lr every step-size epochs"") parser.add_argument(""--lr-gamma"", default=0.1, type=float, help=""decrease lr by a factor of lr-gamma"") parser.add_argument(""--print-freq"", default=10, type=int, help=""print frequency"") parser.add_argument(""--output-dir"", default=""."", type=str, help=""path to save outputs"") parser.add_argument(""--resume"", default="""", type=str, help=""path for resume checkpoint from"") parser.add_argument(""--start-epoch"", default=0, type=int, metavar=""N"", help=""start epoch"") parser.add_argument( ""--cache-dataset"", dest=""cache_dataset"", help=""Cache the datasets for quicker initialization. It also serializes the transforms"", action=""store_true"", ) parser.add_argument( ""--sync-bn"", dest=""sync_bn"", help=""Use sync batch norm"", action=""store_true"", ) parser.add_argument( ""--test-only"", dest=""test_only"", help=""Only test the model"", action=""store_true"", ) parser.add_argument( ""--pretrained"", dest=""pretrained"", help=""Use pre-trained models from the modelzoo"", action=""store_true"", ) parser.add_argument(""--auto-augment"", default=None, type=str, help=""auto augment policy (default: None)"") parser.add_argument(""--random-erase"", default=0.0, type=float, help=""random erasing probability (default: 0.0)"") # Mixed precision training parameters parser.add_argument(""--amp"", action=""store_true"", help=""Use torch.cuda.amp for mixed precision training"") # distributed training parameters parser.add_argument(""--world-size"", default=1, type=int, help=""number of distributed processes"") parser.add_argument(""--dist-url"", default=""env://"", type=str, help=""url used to set up distributed training"") parser.add_argument( ""--model-ema"", action=""store_true"", help=""enable tracking Exponential Moving Average of model parameters"" ) parser.add_argument( ""--model-ema-steps"", type=int, default=32, help=""the number of iterations that controls how often to update the EMA model (default: 32)"", ) parser.add_argument( ""--model-ema-decay"", type=float, default=0.99998, help=""decay factor for Exponential Moving Average of model parameters (default: 0.99998)"", ) parser.add_argument( ""--use-deterministic-algorithms"", action=""store_true"", help=""Forces the use of deterministic algorithms only."" ) parser.add_argument( ""--interpolation"", default=""bilinear"", type=str, help=""the interpolation method (default: bilinear)"" ) parser.add_argument( ""--val-resize-size"", default=256, type=int, help=""the resize size used for validation (default: 256)"" ) parser.add_argument( ""--val-crop-size"", default=224, type=int, help=""the central crop size used for validation (default: 224)"" ) parser.add_argument( ""--train-crop-size"", default=224, type=int, help=""the random crop size used for training (default: 224)"" ) return parser " 5386,"def auth(profile=None, **connection_args): """""" Set up keystone credentials. Only intended to be used within Keystone-enabled modules. CLI Example: .. code-block:: bash salt '*' keystone.auth """""" __utils__[""versions.warn_until""]( ""Phosphorous"", ( ""The keystone module has been deprecated and will be removed in {version}. "" ""Please update to using the keystoneng module"" ), ) kwargs = _get_kwargs(profile=profile, **connection_args) disc = discover.Discover(auth_url=kwargs[""auth_url""]) v2_auth_url = disc.url_for(""v2.0"") v3_auth_url = disc.url_for(""v3.0"") if v3_auth_url: global _OS_IDENTITY_API_VERSION global _TENANTS _OS_IDENTITY_API_VERSION = 3 _TENANTS = ""projects"" kwargs[""auth_url""] = v3_auth_url else: kwargs[""auth_url""] = v2_auth_url kwargs.pop(""user_domain_name"") kwargs.pop(""project_domain_name"") auth = generic.Password(**kwargs) sess = session.Session(auth=auth) ks_cl = disc.create_client(session=sess) return ks_cl ","def auth(profile=None, **connection_args): """""" Set up keystone credentials. Only intended to be used within Keystone-enabled modules. CLI Example: .. code-block:: bash salt '*' keystone.auth """""" __utils__[""versions.warn_until""]( ""Phosphorus"", ( ""The keystone module has been deprecated and will be removed in {version}. "" ""Please update to using the keystoneng module"" ), ) kwargs = _get_kwargs(profile=profile, **connection_args) disc = discover.Discover(auth_url=kwargs[""auth_url""]) v2_auth_url = disc.url_for(""v2.0"") v3_auth_url = disc.url_for(""v3.0"") if v3_auth_url: global _OS_IDENTITY_API_VERSION global _TENANTS _OS_IDENTITY_API_VERSION = 3 _TENANTS = ""projects"" kwargs[""auth_url""] = v3_auth_url else: kwargs[""auth_url""] = v2_auth_url kwargs.pop(""user_domain_name"") kwargs.pop(""project_domain_name"") auth = generic.Password(**kwargs) sess = session.Session(auth=auth) ks_cl = disc.create_client(session=sess) return ks_cl " 30943,"def initialize_global_values(): global MAX_INCIDENTS_TO_FETCH, AUTH_URL, COOKIE, AUTH_HEADERS, QUERY_HEADERS, QUERY_URL,\ CLIENT_ID, CLIENT_SECRET, AUTH_HEADERS, DOMAIN, AUTHORIZATION CLIENT_ID = demisto.getParam('client_id') CLIENT_SECRET = demisto.getParam('client_secret') AUTH_URL = demisto.getParam('url') QUERY_URL = urljoin(demisto.getParam('url'), ""/graphql-demisto"") DOMAIN = demisto.getParam('domain') AUTHORIZATION = ""Basic "" + encoding(CLIENT_ID, CLIENT_SECRET) AUTH_HEADERS = get_headers_for_login() initialise_scrolls_and_rules() MAX_INCIDENTS_TO_FETCH = 300 ","def initialize_global_values(): global MAX_INCIDENTS_TO_FETCH, AUTH_URL, COOKIE, AUTH_HEADERS, QUERY_HEADERS, QUERY_URL,\ CLIENT_ID, CLIENT_SECRET, AUTH_HEADERS, DOMAIN, AUTHORIZATION CLIENT_ID = demisto.getParam('client_id') CLIENT_SECRET = demisto.getParam('client_secret') AUTH_URL = demisto.getParam('url') QUERY_URL = urljoin(demisto.getParam('url'), ""/graphql-demisto"") DOMAIN = demisto.getParam('domain') AUTHORIZATION = ""Basic "" + encoding(CLIENT_ID, CLIENT_SECRET) AUTH_HEADERS = get_headers_for_login() initialise_scrolls_and_rules() MAX_INCIDENTS_TO_FETCH = 200 " 7947,"def wwinp_to_wws(path): """"""Creates WeightWindows classes from a wwinp file Parameters ---------- path : str or pathlib.Path object Path to the wwinp file Returns ------- list of openmc.WeightWindows """""" # create generator for getting the next parameter from the file wwinp = __wwinp_reader(path) # first parameter, if, of wwinp file is unused next(wwinp) # check time parameter, iv if int(float(next(wwinp))) > 1: raise ValueError('Time-dependent weight windows ' 'are not yet supported.') # number of particle types, ni n_particle_types = int(float(next(wwinp))) # read an indicator of the mesh type. # this will be 10 if a rectilinear mesh # and 16 for cylindrical or spherical meshes mesh_chars = int(float(next(wwinp))) if mesh_chars != 10: # TODO: read the first entry by default and display a warning raise NotImplementedError('Cylindrical and Spherical meshes ' 'are not currently supported') # read the number of energy groups for each particle, ne n_egroups = [int(next(wwinp)) for _ in range(n_particle_types)] # order that supported particle types will appear in the file particle_types = ['neutron', 'photon'] # add particle to list if at least one energy group is present particles = [p for e, p in zip(n_egroups, particle_types) if e > 0] # truncate list of energy groups if needed n_egroups = [e for e in n_egroups if e > 0] if n_particle_types > 2: msg = ('More than two particle types are present. ' 'Only neutron and photon weight windows will be read.') warnings.warn(msg) # read total number of fine mesh elements in each coarse # element (nfx, nfy, nfz) n_fine_x = int(float(next(wwinp))) n_fine_y = int(float(next(wwinp))) n_fine_z = int(float(next(wwinp))) header_mesh_dims = (n_fine_x, n_fine_y, n_fine_z) # read the mesh origin: x0, y0, z0 llc = tuple(float(next(wwinp)) for _ in range(3)) # read the number of coarse mesh elements (ncx, ncy, ncz) n_coarse_x = int(float(next(wwinp))) n_coarse_y = int(float(next(wwinp))) n_coarse_z = int(float(next(wwinp))) # skip the value defining the geometry type, nwg, we already know this # 1 - rectilinear mesh # 2 - cylindrical mesh # 3 - spherical mesh mesh_type = int(float(next(wwinp))) if mesh_type != 1: # TODO: support additional mesh types raise NotImplementedError('Cylindrical and Spherical meshes ' 'are not currently supported') # internal function for parsing mesh coordinates def _read_mesh_coords(wwinp, n_coarse_bins): coords = [float(next(wwinp))] for _ in range(n_coarse_bins): # number of fine mesh elements in this coarse element, sx sx = int(float(next(wwinp))) # value of next coordinate, px px = float(next(wwinp)) # fine mesh ratio, qx (currently unused) qx = next(wwinp) # append the fine mesh coordinates for this coarse element coords += list(np.linspace(coords[-1], px, sx + 1))[1:] return np.asarray(coords) # read the coordinates for each dimension into a rectilinear mesh mesh = RectilinearMesh() mesh.x_grid = _read_mesh_coords(wwinp, n_coarse_x) mesh.y_grid = _read_mesh_coords(wwinp, n_coarse_y) mesh.z_grid = _read_mesh_coords(wwinp, n_coarse_z) dims = ('x', 'y', 'z') # check consistency of mesh coordinates mesh_llc = mesh_val = (mesh.x_grid[0], mesh.y_grid[0], mesh.z_grid[0]) for dim, header_val, mesh_val in zip(dims, llc, mesh_llc): if header_val != mesh_val: msg = ('The {} corner of the mesh ({}) does not match ' 'the value read in block 1 of the wwinp file ({})') raise ValueError(msg.format(dim, mesh_val, header_val)) # check total number of mesh elements in each direction mesh_dims = mesh.dimension for dim, header_val, mesh_val in zip(dims, header_mesh_dims, mesh_dims): if header_val != mesh_val: msg = ('Total number of mesh elements read in the {} ' 'direction ({}) is inconsistent with the ' 'number read in block 1 of the wwinp file ({})') raise ValueError(msg.format(dim, mesh_val, header_val)) # read energy bins and weight window values for each particle wws = [] for particle, ne in zip(particles, n_egroups): # read upper energy bounds # it is implied that zero is always the first bound in MCNP e_bounds = np.asarray([0.0] + [float(next(wwinp)) for _ in range(ne)]) # adjust energy from MeV to eV e_bounds *= 1E6 # create an array for weight window lower bounds ww_lb = np.zeros((*mesh.dimension, ne)) for ijk in mesh.indices: # MCNP ordering for weight windows matches that of OpenMC # ('xyz' with x changing fastest) idx = tuple([v - 1 for v in ijk] + [slice(None)]) ww_lb[idx] = [float(next(wwinp)) for _ in range(ne)] # create a WeightWindows object and add it to the output list ww = WeightWindows(id=None, mesh=mesh, lower_ww_bounds=ww_lb.flatten(), upper_bound_ratio=5.0, energy_bounds=e_bounds, particle_type=particle) wws.append(ww) return wws ","def wwinp_to_wws(path): """"""Creates WeightWindows classes from a wwinp file Parameters ---------- path : str or pathlib.Path object Path to the wwinp file Returns ------- list of openmc.WeightWindows """""" # create generator for getting the next parameter from the file wwinp = __wwinp_reader(path) # first parameter, if, of wwinp file is unused next(wwinp) # check time parameter, iv if int(float(next(wwinp))) > 1: raise ValueError('Time-dependent weight windows ' 'are not yet supported.') # number of particle types, ni n_particle_types = int(float(next(wwinp))) # read an indicator of the mesh type. # this will be 10 if a rectilinear mesh # and 16 for cylindrical or spherical meshes mesh_chars = int(float(next(wwinp))) if mesh_chars != 10: # TODO: read the first entry by default and display a warning raise NotImplementedError('Cylindrical and Spherical meshes ' 'are not currently supported') # read the number of energy groups for each particle, ne n_egroups = [int(next(wwinp)) for _ in range(n_particle_types)] # order that supported particle types will appear in the file particle_types = ['neutron', 'photon'] # add particle to list if at least one energy group is present particles = [p for e, p in zip(n_egroups, particle_types) if e > 0] # truncate list of energy groups if needed n_egroups = [e for e in n_egroups if e > 0] if n_particle_types > 2: msg = ('More than two particle types are present. ' 'Only neutron and photon weight windows will be read.') warnings.warn(msg) # read total number of fine mesh elements in each coarse # element (nfx, nfy, nfz) n_fine_x = int(float(next(wwinp))) n_fine_y = int(float(next(wwinp))) n_fine_z = int(float(next(wwinp))) header_mesh_dims = (n_fine_x, n_fine_y, n_fine_z) # read the mesh origin: x0, y0, z0 llc = tuple(float(next(wwinp)) for _ in range(3)) # read the number of coarse mesh elements (ncx, ncy, ncz) n_coarse_x = int(float(next(wwinp))) n_coarse_y = int(float(next(wwinp))) n_coarse_z = int(float(next(wwinp))) # skip the value defining the geometry type, nwg, we already know this # 1 - rectilinear mesh # 2 - cylindrical mesh # 3 - spherical mesh mesh_type = int(float(next(wwinp))) if mesh_type != 1: # TODO: support additional mesh types raise NotImplementedError('Cylindrical and Spherical meshes ' 'are not currently supported') # internal function for parsing mesh coordinates def _read_mesh_coords(wwinp, n_coarse_bins): coords = [float(next(wwinp))] for _ in range(n_coarse_bins): # number of fine mesh elements in this coarse element, sx sx = int(float(next(wwinp))) # value of next coordinate, px px = float(next(wwinp)) # fine mesh ratio, qx (currently unused) qx = next(wwinp) # append the fine mesh coordinates for this coarse element coords += list(np.linspace(coords[-1], px, sx + 1))[1:] return np.asarray(coords) # read the coordinates for each dimension into a rectilinear mesh mesh = RectilinearMesh() mesh.x_grid = _read_mesh_coords(wwinp, n_coarse_x) mesh.y_grid = _read_mesh_coords(wwinp, n_coarse_y) mesh.z_grid = _read_mesh_coords(wwinp, n_coarse_z) dims = ('x', 'y', 'z') # check consistency of mesh coordinates mesh_llc = mesh_val = (mesh.x_grid[0], mesh.y_grid[0], mesh.z_grid[0]) for dim, header_val, mesh_val in zip(dims, llc, mesh_llc): if header_val != mesh_val: msg = ('The {} corner of the mesh ({}) does not match ' 'the value read in block 1 of the wwinp file ({})') raise ValueError(msg.format(dim, mesh_val, header_val)) # check total number of mesh elements in each direction mesh_dims = mesh.dimension for dim, header_val, mesh_val in zip(dims, header_mesh_dims, mesh_dims): if header_val != mesh_val: msg = ('Total number of mesh elements read in the {} ' 'direction ({}) is inconsistent with the ' 'number read in block 1 of the wwinp file ({})') raise ValueError(msg.format(dim, mesh_val, header_val)) # read energy bins and weight window values for each particle wws = [] for particle, ne in zip(particles, n_egroups): # read upper energy bounds # it is implied that zero is always the first bound in MCNP e_bounds = np.asarray([0.0] + [float(next(wwinp)) for _ in range(ne)]) # adjust energy from MeV to eV e_bounds *= 1e6 # create an array for weight window lower bounds ww_lb = np.zeros((*mesh.dimension, ne)) for ijk in mesh.indices: # MCNP ordering for weight windows matches that of OpenMC # ('xyz' with x changing fastest) idx = tuple([v - 1 for v in ijk] + [slice(None)]) ww_lb[idx] = [float(next(wwinp)) for _ in range(ne)] # create a WeightWindows object and add it to the output list ww = WeightWindows(id=None, mesh=mesh, lower_ww_bounds=ww_lb.flatten(), upper_bound_ratio=5.0, energy_bounds=e_bounds, particle_type=particle) wws.append(ww) return wws " 3708,"def assert_equal(actual, desired, fname): __tracebackhide__ = True # Hide traceback for py.test actual, desired = str(actual), str(desired) if actual == desired: return detected = str(__cpu_features__).replace(""'"", """") try: with open(""/proc/cpuinfo"", ""r"") as fd: cpuinfo = fd.read(2048) except Exception as err: cpuinfo = str(err) try: import subprocess auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV=""1"")) auxv = auxv.decode() except Exception as err: auxv = str(err) import textwrap error_report = textwrap.indent( """""" ########################################### ### Extra debugging information ########################################### ------------------------------------------- --- NumPy Detections ------------------------------------------- %s ------------------------------------------- --- SYS / CPUINFO ------------------------------------------- %s.... ------------------------------------------- --- SYS / AUXV ------------------------------------------- %s """""" % (detected, cpuinfo, auxv), prefix='\r') raise AssertionError(( ""Failure Detection\n"" "" NAME: '%s'\n"" "" ACTUAL: %s\n"" "" DESIRED: %s\n"" ""%s"" ) % (fname, actual, desired, error_report)) ","def assert_features_equal(actual, desired, fname): __tracebackhide__ = True # Hide traceback for py.test actual, desired = str(actual), str(desired) if actual == desired: return detected = str(__cpu_features__).replace(""'"", """") try: with open(""/proc/cpuinfo"", ""r"") as fd: cpuinfo = fd.read(2048) except Exception as err: cpuinfo = str(err) try: import subprocess auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV=""1"")) auxv = auxv.decode() except Exception as err: auxv = str(err) import textwrap error_report = textwrap.indent( """""" ########################################### ### Extra debugging information ########################################### ------------------------------------------- --- NumPy Detections ------------------------------------------- %s ------------------------------------------- --- SYS / CPUINFO ------------------------------------------- %s.... ------------------------------------------- --- SYS / AUXV ------------------------------------------- %s """""" % (detected, cpuinfo, auxv), prefix='\r') raise AssertionError(( ""Failure Detection\n"" "" NAME: '%s'\n"" "" ACTUAL: %s\n"" "" DESIRED: %s\n"" ""%s"" ) % (fname, actual, desired, error_report)) " 56811,"def set_cleanliness_flags_for_all_domains(force_full=False): """""" Updates cleanliness for all domains """""" for domain in Domain.get_all(): if domain.use_livequery: continue try: set_cleanliness_flags_for_domain(domain.name, force_full=force_full) except InvalidDomainError as e: notify_exception(None, six.text_type(e)) ","def set_cleanliness_flags_for_all_domains(force_full=False): """""" Updates cleanliness for all domains """""" for domain_obj in Domain.get_all(): if domain_obj.use_livequery: continue try: set_cleanliness_flags_for_domain(domain.name, force_full=force_full) except InvalidDomainError as e: notify_exception(None, six.text_type(e)) " 57159,"def send_bulk_mail( sender_email: str, recipient_emails: List[str], subject: str, plaintext_body: str, html_body: str ) -> None: """"""Sends emails to all recipients in recipient_emails. In general this function should only be called from email_manager._send_bulk_mail(). Args: sender_email: str. The email address of the sender. This should be in the form 'SENDER_NAME ' or 'SENDER_EMAIL_ADDRESS'. Format must be utf-8. recipient_emails: list(str). List of the email addresses of recipients. Format must be utf-8. subject: str. The subject line of the email. Format must be utf-8. plaintext_body: str. The plaintext body of the email. Format must be utf-8. html_body: str. The HTML body of the email. Must fit in a datastore entity. Format must be utf-8. Raises: Exception. The configuration in feconf.py forbids emails from being sent. ValueError. Any recipient email addresses are malformed. ValueError. Any sender email address is malformed. Exception. The emails were not sent correctly. In other words, the send_email_to_recipients() function returned False (signifying API returned bad status code). """""" if not feconf.CAN_SEND_EMAILS: raise Exception('This app cannot send emails to users.') for recipient_email in recipient_emails: if not _is_email_valid(recipient_email): raise ValueError( 'Malformed recipient email address: %s' % recipient_email) if not _is_sender_email_valid(sender_email): raise ValueError( 'Malformed sender email address: %s' % sender_email) response = email_services.send_email_to_recipients( sender_email, recipient_emails, subject, plaintext_body, html_body) if not response: raise Exception( 'Bulk email failed to send. Please try again later or contact us ' + 'to report a bug at https://www.oppia.org/contact.') ","def send_bulk_mail( sender_email: str, recipient_emails: List[str], subject: str, plaintext_body: str, html_body: str ) -> None: """"""Sends emails to all recipients in recipient_emails. In general this function should only be called from email_manager._send_bulk_mail(). Args: sender_email: str. The email address of the sender. This should be in the form 'SENDER_NAME ' or 'SENDER_EMAIL_ADDRESS'. Format must be utf-8. recipient_emails: list(str). List of the email addresses of recipients. Format must be utf-8. subject: str. The subject line of the email. Format must be utf-8. plaintext_body: str. The plaintext body of the email. Format must be utf-8. html_body: str. The HTML body of the email. Must fit in a datastore entity. Format must be utf-8. Raises: Exception. The configuration in feconf.py forbids emails from being sent. ValueError. Any recipient email addresses are malformed. ValueError. Any sender email address is malformed. Exception. The emails were not sent correctly. In other words, the send_email_to_recipients() function returned False (signifying API returned bad status code). """""" if not feconf.CAN_SEND_EMAILS: raise Exception('This app cannot send emails to users.') for recipient_email in recipient_emails: if not _is_email_valid(recipient_email): raise ValueError( 'Malformed recipient email address: %s' % recipient_email) if not _is_sender_email_valid(sender_email): raise ValueError( 'Malformed sender email address: %s' % sender_email) response = email_services.send_email_to_recipients( sender_email, recipient_emails, subject, plaintext_body, html_body) if not response: raise Exception( 'Bulk email failed to send. Please try again later or contact us ' + 'to report a bug at https://www.oppia.org/contact.') " 7236,"def inertia_tensor_eigvals(image, mu=None, T=None): """"""Compute the eigenvalues of the inertia tensor of the image. The inertia tensor measures covariance of the image intensity along the image axes. (See `inertia_tensor`.) The relative magnitude of the eigenvalues of the tensor is thus a measure of the elongation of a (bright) object in the image. Parameters ---------- image : array The input image. mu : array, optional The pre-computed central moments of ``image``. T : array, shape ``(image.ndim, image.ndim)`` The pre-computed inertia tensor. If ``T`` is given, ``mu`` and ``image`` are ignored. Returns ------- eigvals : list of float, length ``image.ndim`` The eigenvalues of the inertia tensor of ``image``, in descending order. Notes ----- Computing the eigenvalues requires the inertia tensor of the input image. This is much faster if the central moments (``mu``) are provided, or, alternatively, one can provide the inertia tensor (``T``) directly. """""" if T is None: T = inertia_tensor(image, mu) eigvals = np.linalg.eigvalsh(T) """""" Floating point precision problems could make a positive semidefinite matrix have an eigenvalue that is very slightly negative. This can cause problems down the line, so set values very near zero to zero. """""" eigvals = np.where(abs(eigvals) < 1e-12, 0, eigvals) return sorted(eigvals, reverse=True) ","def inertia_tensor_eigvals(image, mu=None, T=None): """"""Compute the eigenvalues of the inertia tensor of the image. The inertia tensor measures covariance of the image intensity along the image axes. (See `inertia_tensor`.) The relative magnitude of the eigenvalues of the tensor is thus a measure of the elongation of a (bright) object in the image. Parameters ---------- image : array The input image. mu : array, optional The pre-computed central moments of ``image``. T : array, shape ``(image.ndim, image.ndim)`` The pre-computed inertia tensor. If ``T`` is given, ``mu`` and ``image`` are ignored. Returns ------- eigvals : list of float, length ``image.ndim`` The eigenvalues of the inertia tensor of ``image``, in descending order. Notes ----- Computing the eigenvalues requires the inertia tensor of the input image. This is much faster if the central moments (``mu``) are provided, or, alternatively, one can provide the inertia tensor (``T``) directly. """""" if T is None: T = inertia_tensor(image, mu) eigvals = np.linalg.eigvalsh(T) """""" Floating point precision problems could make a positive semidefinite matrix have an eigenvalue that is very slightly negative. This can cause problems down the line, so set values very near zero to zero. """""" eigvals = np.clip(eigvals, 0, None, out=eigvals) return sorted(eigvals, reverse=True) " 2493,"def test_gradient_boosting_early_stopping(): X, y = make_classification(n_samples=1000, random_state=0) gbc = GradientBoostingClassifier( n_estimators=100, n_iter_no_change=10, learning_rate=0.1, max_depth=3, random_state=42, ) gbr = GradientBoostingRegressor( n_estimators=100, n_iter_no_change=10, learning_rate=0.1, max_depth=3, random_state=42, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) # Check if early_stopping works as expected for est, tol, early_stop_n_estimators in ( (gbc, 1e-1, 28), (gbr, 1e-1, 13), (gbc, 1e-3, 70), (gbr, 1e-3, 28), ): est.set_params(tol=tol) est.fit(X_train, y_train) assert est.n_estimators_ == early_stop_n_estimators assert est.score(X_test, y_test) > 0.7 # Without early stopping gbc = GradientBoostingClassifier( n_estimators=5, learning_rate=0.1, max_depth=3, random_state=42 ) gbc.fit(X, y) gbr = GradientBoostingRegressor( n_estimators=10, learning_rate=0.1, max_depth=3, random_state=42 ) gbr.fit(X, y) assert gbc.n_estimators_ == 5 assert gbr.n_estimators_ == 10 ","def test_gradient_boosting_early_stopping(): X, y = make_classification(n_samples=1000, random_state=0) gbc = GradientBoostingClassifier( n_estimators=100, n_iter_no_change=10, learning_rate=0.1, max_depth=3, random_state=42, ) gbr = GradientBoostingRegressor( n_estimators=100, n_iter_no_change=10, learning_rate=0.1, max_depth=3, random_state=42, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) # Check if early_stopping works as expected for est, tol, early_stop_n_estimators in ( (gbc, 1e-1, 28), (gbr, 1e-1, 13), (gbc, 1e-3, 70), (gbr, 1e-3, 28), ): est.set_params(tol=tol) est.fit(X_train, y_train) assert est.n_estimators_ == early_stop_n_estimators assert est.score(X_test, y_test) > 0.7 # Without early stopping gbc = GradientBoostingClassifier( n_estimators=50, learning_rate=0.1, max_depth=3, random_state=42 ) gbc.fit(X, y) gbr = GradientBoostingRegressor( n_estimators=10, learning_rate=0.1, max_depth=3, random_state=42 ) gbr.fit(X, y) assert gbc.n_estimators_ == 5 assert gbr.n_estimators_ == 10 " 30663,"def auto_detect_indicator_type(indicator_value): """""" Infer the type of the indicator. :type indicator_value: ``str`` :param indicator_value: The indicator whose type we want to check. (required) :return: The type of the indicator. :rtype: ``str`` """""" try: import tldextract except Exception: raise Exception(""Missing tldextract module, In order to use the auto detect function please use a docker"" "" image with it installed such as: demisto/jmespath:1.0.0.6980"") if re.match(ipv4cidrRegex, indicator_value): return FeedIndicatorType.CIDR if re.match(ipv6cidrRegex, indicator_value): return FeedIndicatorType.IPv6CIDR if re.match(ipv4Regex, indicator_value): return FeedIndicatorType.IP if re.match(ipv6Regex, indicator_value): return FeedIndicatorType.IPv6 if re.match(sha256Regex, indicator_value): return FeedIndicatorType.File if re.match(urlRegex, indicator_value): return FeedIndicatorType.URL if re.match(md5Regex, indicator_value): return FeedIndicatorType.File if re.match(sha1Regex, indicator_value): return FeedIndicatorType.File if re.match(emailRegex, indicator_value): return FeedIndicatorType.Email if re.match(cveRegex, indicator_value): return FeedIndicatorType.CVE try: no_cache_extract = tldextract.TLDExtract(cache_file=False,suffix_list_urls=None) if no_cache_extract(indicator_value).suffix: if '*' in indicator_value: return FeedIndicatorType.DomainGlob return FeedIndicatorType.Domain except Exception: pass return None ","def auto_detect_indicator_type(indicator_value): """""" Infer the type of the indicator. :type indicator_value: ``str`` :param indicator_value: The indicator whose type we want to check. (required) :return: The type of the indicator. :rtype: ``str`` """""" try: import tldextract except Exception: raise Exception(""Missing tldextract module, In order to use the auto detect function please use a docker"" "" image with it installed such as the latest tag of: demisto/jmespath"") if re.match(ipv4cidrRegex, indicator_value): return FeedIndicatorType.CIDR if re.match(ipv6cidrRegex, indicator_value): return FeedIndicatorType.IPv6CIDR if re.match(ipv4Regex, indicator_value): return FeedIndicatorType.IP if re.match(ipv6Regex, indicator_value): return FeedIndicatorType.IPv6 if re.match(sha256Regex, indicator_value): return FeedIndicatorType.File if re.match(urlRegex, indicator_value): return FeedIndicatorType.URL if re.match(md5Regex, indicator_value): return FeedIndicatorType.File if re.match(sha1Regex, indicator_value): return FeedIndicatorType.File if re.match(emailRegex, indicator_value): return FeedIndicatorType.Email if re.match(cveRegex, indicator_value): return FeedIndicatorType.CVE try: no_cache_extract = tldextract.TLDExtract(cache_file=False,suffix_list_urls=None) if no_cache_extract(indicator_value).suffix: if '*' in indicator_value: return FeedIndicatorType.DomainGlob return FeedIndicatorType.Domain except Exception: pass return None " 56964,"def test_get_binary_sha256_hash(stable_binary, stable_binary_sha256_hash): assert get_binary_sha256_hash(stable_binary) == stable_binary_sha256_hash ","def test_get_binary_sha256_hash(): expected_hash = ""XXXX"" assert get_binary_sha256_hash(BytesIO(b'Hello World!')) == expected_hash " 50343,"def delete_record(record_uuid, reason, user): """"""Delete the record and it's PIDs. :param record_uuid: UUID of the record to be removed. :param reason: Reason for removal. Either one of: 'spam', 'uploader', 'takedown' (see 'ZENODO_REMOVAL_REASONS' variable in config), otherwise using it as a verbatim ""Reason"" string. :param user: ID or email of the Zenodo user (admin) responsible for the removal. """""" from invenio_github.models import ReleaseStatus if isinstance(user, text_type): user_id = User.query.filter_by(email=user).one().id elif isinstance(user, int): user_id = User.query.get(user).id else: raise TypeError(""User cannot be determined from argument: {0}"".format( user)) record = ZenodoRecord.get_record(record_uuid) # Remove the record from versioning and delete the recid recid = PersistentIdentifier.get('recid', record['recid']) pv = PIDVersioning(child=recid) pv.remove_child(recid) pv.update_redirect() recid.delete() # Remove the record from index try: RecordIndexer().delete(record) except NotFoundError: pass # Remove buckets record_bucket = record.files.bucket RecordsBuckets.query.filter_by(record_id=record.id).delete() record_bucket.locked = False record_bucket.remove() removal_reasons = dict(current_app.config['ZENODO_REMOVAL_REASONS']) if reason in removal_reasons: reason = removal_reasons[reason] depid, deposit = deposit_resolver.resolve(record['_deposit']['id']) try: doi = PersistentIdentifier.get('doi', record['doi']) except PIDDoesNotExistError: doi = None # Record OpenAIRE info try: original_id = openaire_original_id(record, openaire_type(record))[1] datasource_id = openaire_datasource_id(record) except PIDDoesNotExistError: original_id = None datasource_id = None if pv.children.count() == 0: conceptrecid = PersistentIdentifier.get('recid', record['conceptrecid']) conceptrecid.delete() new_last_child = None else: new_last_child = (pv.last_child.pid_value, str(pv.last_child.object_uuid)) if 'conceptdoi' in record: conceptdoi_value = record['conceptdoi'] else: conceptdoi_value = None # Completely delete the deposit # Deposit will be removed from index deposit.delete(delete_published=True) # Clear the record and put the deletion information record.clear() record.update({ 'removal_reason': reason, 'removed_by': user_id, }) record.commit() # Mark the relevant GitHub Release as deleted for ghr in record.model.github_releases: ghr.status = ReleaseStatus.DELETED if not is_local_doi(doi.pid_value): db.session.delete(doi) db.session.commit() # After successful DB commit, sync the DOIs with DataCite if is_local_doi(doi.pid_value): datacite_inactivate.delay(doi.pid_value) if conceptdoi_value: if new_last_child: # Update last child (updates also conceptdoi) pid_value, rec_uuid = new_last_child datacite_register.delay(pid_value, rec_uuid) else: datacite_inactivate.delay(conceptdoi_value) # Also delete from OpenAIRE index if current_app.config['OPENAIRE_DIRECT_INDEXING_ENABLED'] and original_id \ and datasource_id: openaire_delete.delay(original_id=original_id, datasource_id=datasource_id) # Delete access requests as well as access links link_ids = [] secret_links = SecretLink.query.join(AccessRequest).filter( AccessRequest.recid == int(recid.pid_value) ) for i in secret_links: link_ids.append(i.id) with db.session.begin_nested(): AccessRequest.query.filter( \ AccessRequest.recid == int(recid.pid_value) \ ).delete() for s in link_ids: SecretLink.query.filter(SecretLink.id == s).delete() db.session.commit() ","def delete_record(record_uuid, reason, user): """"""Delete the record and it's PIDs. :param record_uuid: UUID of the record to be removed. :param reason: Reason for removal. Either one of: 'spam', 'uploader', 'takedown' (see 'ZENODO_REMOVAL_REASONS' variable in config), otherwise using it as a verbatim ""Reason"" string. :param user: ID or email of the Zenodo user (admin) responsible for the removal. """""" from invenio_github.models import ReleaseStatus if isinstance(user, text_type): user_id = User.query.filter_by(email=user).one().id elif isinstance(user, int): user_id = User.query.get(user).id else: raise TypeError(""User cannot be determined from argument: {0}"".format( user)) record = ZenodoRecord.get_record(record_uuid) # Remove the record from versioning and delete the recid recid = PersistentIdentifier.get('recid', record['recid']) pv = PIDVersioning(child=recid) pv.remove_child(recid) pv.update_redirect() recid.delete() # Remove the record from index try: RecordIndexer().delete(record) except NotFoundError: pass # Remove buckets record_bucket = record.files.bucket RecordsBuckets.query.filter_by(record_id=record.id).delete() record_bucket.locked = False record_bucket.remove() removal_reasons = dict(current_app.config['ZENODO_REMOVAL_REASONS']) if reason in removal_reasons: reason = removal_reasons[reason] depid, deposit = deposit_resolver.resolve(record['_deposit']['id']) try: doi = PersistentIdentifier.get('doi', record['doi']) except PIDDoesNotExistError: doi = None # Record OpenAIRE info try: original_id = openaire_original_id(record, openaire_type(record))[1] datasource_id = openaire_datasource_id(record) except PIDDoesNotExistError: original_id = None datasource_id = None if pv.children.count() == 0: conceptrecid = PersistentIdentifier.get('recid', record['conceptrecid']) conceptrecid.delete() new_last_child = None else: new_last_child = (pv.last_child.pid_value, str(pv.last_child.object_uuid)) if 'conceptdoi' in record: conceptdoi_value = record['conceptdoi'] else: conceptdoi_value = None # Completely delete the deposit # Deposit will be removed from index deposit.delete(delete_published=True) # Clear the record and put the deletion information record.clear() record.update({ 'removal_reason': reason, 'removed_by': user_id, }) record.commit() # Mark the relevant GitHub Release as deleted for ghr in record.model.github_releases: ghr.status = ReleaseStatus.DELETED if not is_local_doi(doi.pid_value): db.session.delete(doi) db.session.commit() # After successful DB commit, sync the DOIs with DataCite if is_local_doi(doi.pid_value): datacite_inactivate.delay(doi.pid_value) if conceptdoi_value: if new_last_child: # Update last child (updates also conceptdoi) pid_value, rec_uuid = new_last_child datacite_register.delay(pid_value, rec_uuid) else: datacite_inactivate.delay(conceptdoi_value) # Also delete from OpenAIRE index if current_app.config['OPENAIRE_DIRECT_INDEXING_ENABLED'] and original_id \ and datasource_id: openaire_delete.delay(original_id=original_id, datasource_id=datasource_id) # Delete access requests as well as access links secret_links = SecretLink.query.join(AccessRequest).filter( AccessRequest.recid == int(recid.pid_value) ) link_ids = [l.id for l in secret_links] with db.session.begin_nested(): AccessRequest.query.filter( \ AccessRequest.recid == int(recid.pid_value) \ ).delete() for s in link_ids: SecretLink.query.filter(SecretLink.id == s).delete() db.session.commit() " 29778,"def populate_aggregated_jsons(path): """"""Aggregate across the entire BIDS dataset .json's into top level .json's Top level .json files would contain only the fields which are common to all subject[/session]/type/*_modality.json's. ATM aggregating only for *_task*_bold.json files. Only the task- and OPTIONAL _acq- field is retained within the aggregated filename. The other BIDS _key-value pairs are ""aggregated over"". Parameters ---------- path: str Path to the top of the BIDS dataset """""" # TODO: collect all task- .json files for func files to tasks = {} # way too many -- let's just collect all which are the same! # FIELDS_TO_TRACK = {'RepetitionTime', 'FlipAngle', 'EchoTime', # 'Manufacturer', 'SliceTiming', ''} for fpath in find_files('.*_task-.*\_bold\.json', topdir=path, exclude_vcs=True, exclude=""/\.(datalad|heudiconv)/""): # # According to BIDS spec I think both _task AND _acq (may be more? # _rec, _dir, ...?) should be retained? # TODO: if we are to fix it, then old ones (without _acq) should be # removed first task = re.sub('.*_(task-[^_\.]*(_acq-[^_\.]*)?)_.*', r'\1', fpath) json_ = load_json(fpath) if task not in tasks: tasks[task] = json_ else: rec = tasks[task] # let's retain only those fields which have the same value for field in sorted(rec): if field not in json_ or json_[field] != rec[field]: del rec[field] # create a stub onsets file for each one of those suf = '_bold.json' assert fpath.endswith(suf) # specify the name of the '_events.tsv' file: if '_echo-' in fpath: # multi-echo sequence: bids (1.1.0) specifies just one '_events.tsv' # file, common for all echoes. The name will not include _echo-. # TODO: RF to use re.match for better readability/robustness # So, find out the echo number: fpath_split = fpath.split('_echo-', 1) # split fpath using '_echo-' fpath_split_2 = fpath_split[1].split('_', 1) # split the second part of fpath_split using '_' echoNo = fpath_split_2[0] # get echo number if echoNo == '1': if len(fpath_split_2) != 2: raise ValueError(""Found no trailer after _echo-"") # we modify fpath to exclude '_echo-' + echoNo: fpath = fpath_split[0] + '_' + fpath_split_2[1] else: # for echoNo greater than 1, don't create the events file, so go to # the next for loop iteration: continue events_file = fpath[:-len(suf)] + '_events.tsv' # do not touch any existing thing, it may be precious if not op.lexists(events_file): lgr.debug(""Generating %s"", events_file) with open(events_file, 'w') as f: f.write( ""onset\tduration\ttrial_type\tresponse_time\tstim_file"" ""\tTODO -- fill in rows and add more tab-separated "" ""columns if desired"") # extract tasks files stubs for task_acq, fields in tasks.items(): task_file = op.join(path, task_acq + '_bold.json') # Since we are pulling all unique fields we have to possibly # rewrite this file to guarantee consistency. # See https://github.com/nipy/heudiconv/issues/277 for a usecase/bug # when we didn't touch existing one. # But the fields we enter (TaskName and CogAtlasID) might need need # to be populated from the file if it already exists placeholders = { ""TaskName"": (""TODO: full task name for %s"" % task_acq.split('_')[0].split('-')[1]), ""CogAtlasID"": ""doi:TODO"", } if op.lexists(task_file): j = load_json(task_file) # Retain possibly modified placeholder fields for f in placeholders: if f in j: placeholders[f] = j[f] act = ""Regenerating"" else: act = ""Generating"" lgr.debug(""%s %s"", act, task_file) fields.update(placeholders) save_json(task_file, fields, sort_keys=True, pretty=True) ","def populate_aggregated_jsons(path): """"""Aggregate across the entire BIDS dataset .json's into top level .json's Top level .json files would contain only the fields which are common to all subject[/session]/type/*_modality.json's. ATM aggregating only for *_task*_bold.json files. Only the task- and OPTIONAL _acq- field is retained within the aggregated filename. The other BIDS _key-value pairs are ""aggregated over"". Parameters ---------- path: str Path to the top of the BIDS dataset """""" # TODO: collect all task- .json files for func files to tasks = {} # way too many -- let's just collect all which are the same! # FIELDS_TO_TRACK = {'RepetitionTime', 'FlipAngle', 'EchoTime', # 'Manufacturer', 'SliceTiming', ''} for fpath in find_files('.*_task-.*\_bold\.json', topdir=path, exclude_vcs=True, exclude=""/\.(datalad|heudiconv)/""): # # According to BIDS spec I think both _task AND _acq (may be more? # _rec, _dir, ...?) should be retained? # TODO: if we are to fix it, then old ones (without _acq) should be # removed first task = re.sub('.*_(task-[^_\.]*(_acq-[^_\.]*)?)_.*', r'\1', fpath) json_ = load_json(fpath) if task not in tasks: tasks[task] = json_ else: rec = tasks[task] # let's retain only those fields which have the same value for field in sorted(rec): if field not in json_ or json_[field] != rec[field]: del rec[field] # create a stub onsets file for each one of those suf = '_bold.json' assert fpath.endswith(suf) # specify the name of the '_events.tsv' file: if '_echo-' in fpath: # multi-echo sequence: bids (1.1.0) specifies just one '_events.tsv' # file, common for all echoes. The name will not include _echo-. # TODO: RF to use re.match for better readability/robustness # So, find out the echo number: fpath_split = fpath.split('_echo-', 1) # split fpath using '_echo-' fpath_split_2 = fpath_split[1].split('_', 1) # split the second part of fpath_split using '_' echoNo = fpath_split_2[0] # get echo number if echoNo == '1': if len(fpath_split_2) != 2: raise ValueError(""Found no trailer after _echo-"") # we modify fpath to exclude '_echo-' + echoNo: fpath = fpath_split[0] + '_' + fpath_split_2[1] else: # for echoNo greater than 1, don't create the events file, so go to # the next for loop iteration: continue events_file = fpath[:-len(suf)] + '_events.tsv' # do not touch any existing thing, it may be precious if not op.lexists(events_file): lgr.debug(""Generating %s"", events_file) with open(events_file, 'w') as f: f.write( ""onset\tduration\ttrial_type\tresponse_time\tstim_file"" ""\tTODO -- fill in rows and add more tab-separated "" ""columns if desired"") # extract tasks files stubs for task_acq, fields in tasks.items(): task_file = op.join(path, task_acq + '_bold.json') # Since we are pulling all unique fields we have to possibly # rewrite this file to guarantee consistency. # See https://github.com/nipy/heudiconv/issues/277 for a usecase/bug # when we didn't touch existing one. # But the fields we enter (TaskName and CogAtlasID) might need need # to be populated from the file if it already exists placeholders = { ""TaskName"": (""TODO: full task name for %s"" % task_acq.split('_')[0].split('-')[1]), ""CogAtlasID"": ""http://www.cognitiveatlas.org/task/id/TODO"", } if op.lexists(task_file): j = load_json(task_file) # Retain possibly modified placeholder fields for f in placeholders: if f in j: placeholders[f] = j[f] act = ""Regenerating"" else: act = ""Generating"" lgr.debug(""%s %s"", act, task_file) fields.update(placeholders) save_json(task_file, fields, sort_keys=True, pretty=True) " 23286,"def help_msg(): print(""Usage: [-h hostname] [-p port] [[probe-name] ...]"") print("" -h hostname name of the host to run the test against"") print("" localhost by default"") print("" -p port port number to use for connection, 4433 by default"") print("" probe-name if present, will run only the probes with given"") print("" names and not all of them, e.g \""sanity\"""") print("" -e probe-name exclude the probe from the list of the ones run"") print("" may be specified multiple times"") print("" -x probe-name expect the probe to fail. When such probe passes despite being marked like this"") print("" it will be reported in the test summary and the whole script will fail."") print("" May be specified multiple times."") print("" -X message expect the `message` substring in exception raised during"") print("" execution of preceding expected failure probe"") print("" usage: [-x probe-name] [-X exception], order is compulsory!"") print("" -C cipher specify cipher for connection. Use integer value"") print("" or IETF name. Integer must be prefixed with '0x'"") print("" if it is hexadecimal. By default uses"") print("" TLS_RSA_WITH_AES_128_CBC_SHA ciphersuite."") print("" -n num only run `num` random tests instead of a full set"") print("" (\""sanity\"" tests are always executed)"") print("" -i interface Allows recording timing information on the"") print("" specified interface. Required to enable timing tests"") print("" -o dir Specifies output directory for timing information"") print("" /tmp by default"") print("" --repeat rep How many timing samples should be gathered for each test"") print("" 100 by default"") print("" --quick Only run a basic subset of tests"") print("" --cpu-list Set the CPU affinity for the tcpdump process"") print("" See taskset(1) man page for the syntax of this"") print("" option. Not used by default."") print("" --payload-len num Size of the sent Application Data record, in bytes"") print("" 512 by default."") print("" --help this message"") ","def help_msg(): print(""Usage: [-h hostname] [-p port] [[probe-name] ...]"") print("" -h hostname name of the host to run the test against"") print("" localhost by default"") print("" -p port port number to use for connection, 4433 by default"") print("" probe-name if present, will run only the probes with given"") print("" names and not all of them, e.g \""sanity\"""") print("" -e probe-name exclude the probe from the list of the ones run"") print("" may be specified multiple times"") print("" -x probe-name expect the probe to fail. When such probe passes despite being marked like this"") print("" it will be reported in the test summary and the whole script will fail."") print("" May be specified multiple times."") print("" -X message expect the `message` substring in exception raised during"") print("" execution of preceding expected failure probe"") print("" usage: [-x probe-name] [-X exception], order is compulsory!"") print("" -C cipher specify cipher for connection. Use integer value"") print("" or IETF name. Integer must be prefixed with '0x'"") print("" if it is hexadecimal. By default uses"") print("" TLS_RSA_WITH_AES_128_CBC_SHA ciphersuite."") print("" -n num only run `num` random tests instead of a full set"") print("" (\""sanity\"" tests are always executed)"") print("" -i interface Allows recording timing information on the"") print("" specified interface. Required to enable timing tests"") print("" -o dir Specifies output directory for timing information"") print("" /tmp by default"") print("" --repeat rep How many timing samples should be gathered for each test"") print("" 100 by default"") print("" --quick Only run a basic subset of tests"") print("" --cpu-list Set the CPU affinity for the tcpdump process"") print("" See taskset(1) man page for the syntax of this"") print("" option. Not used by default."") print("" --payload-len num Size of the sent Application Data record, in bytes."") print("" 512 by default."") print("" --help this message"") " 4895,"def segment_hits(cx, cy, x, y, radius): """""" Return the indices of the segments in the polyline with coordinates (*cx*, *cy*) that are within a distance *radius* of the point (*x*, *y*). """""" # Process single points specially if len(x) <= 1: res, = np.nonzero(np.hypot(cx - x, cy - y) <= radius) return res # We need to lop the last element off a lot. xr, yr = x[:-1], y[:-1] # Only look at line segments whose nearest point to C on the line # lies within the segment. dx, dy = x[1:] - xr, y[1:] - yr u = (cx - xr) * dx + (cy - yr) * dy candidates = (u >= 0) & (u <= dx ** 2 + dy ** 2) # Note that there is a little area near one side of each point # which will be near neither segment, and another which will # be near both, depending on the angle of the lines. The # following radius test eliminates these ambiguities. point_hits = np.hypot(cx - x, cy - y) <= radius candidates = candidates & ~(point_hits[:-1] | point_hits[1:]) # For those candidates which remain, determine how far they lie away # from the line. px, py = xr + u * dx, yr + u * dy line_hits = np.hypot(cx - px, cy - py) <= radius line_hits = line_hits & candidates points, = point_hits.nonzero() lines, = line_hits.nonzero() return np.concatenate((points, lines)) ","def segment_hits(cx, cy, x, y, radius): """""" Return the indices of the segments in the polyline with coordinates (*cx*, *cy*) that are within a distance *radius* of the point (*x*, *y*). """""" # Process single points specially if len(x) <= 1: res, = np.nonzero(np.hypot(cx - x, cy - y) <= radius) return res # We need to lop the last element off a lot. xr, yr = x[:-1], y[:-1] # Only look at line segments whose nearest point to C on the line # lies within the segment. dx, dy = x[1:] - xr, y[1:] - yr u = (cx - xr) * dx + (cy - yr) * dy candidates = (0 <= u) & (u <= dx ** 2 + dy ** 2) # Note that there is a little area near one side of each point # which will be near neither segment, and another which will # be near both, depending on the angle of the lines. The # following radius test eliminates these ambiguities. point_hits = np.hypot(cx - x, cy - y) <= radius candidates = candidates & ~(point_hits[:-1] | point_hits[1:]) # For those candidates which remain, determine how far they lie away # from the line. px, py = xr + u * dx, yr + u * dy line_hits = np.hypot(cx - px, cy - py) <= radius line_hits = line_hits & candidates points, = point_hits.nonzero() lines, = line_hits.nonzero() return np.concatenate((points, lines)) " 7660,"def _notify_registration(registration, template, to_managers=False): from indico.modules.events.registration.util import get_ticket_attachments attachments = [] regform = registration.registration_form tickets_handled = values_from_signal(signals.event.is_ticketing_handled.send(regform), single_value=True) if (not to_managers and regform.tickets_enabled and regform.ticket_on_email and not any(tickets_handled) and registration.state == RegistrationState.complete): attachments += get_ticket_attachments(registration) if (not to_managers and registration.registration_form.complete_registration_attach_ical): event_ical = event_to_ical(registration.event) attachments.append(('invite.ics', event_ical, 'text/calendar')) template = get_template_module(f'events/registration/emails/{template}', registration=registration) to_list = registration.email if not to_managers else registration.registration_form.manager_notification_recipients from_address = registration.registration_form.sender_address if not to_managers else None mail = make_email(to_list=to_list, template=template, html=True, from_address=from_address, attachments=attachments) user = session.user if session else None send_email(mail, event=registration.registration_form.event, module='Registration', user=user, log_metadata={'registration_id': registration.id}) ","def _notify_registration(registration, template, to_managers=False): from indico.modules.events.registration.util import get_ticket_attachments attachments = [] regform = registration.registration_form tickets_handled = values_from_signal(signals.event.is_ticketing_handled.send(regform), single_value=True) if (not to_managers and regform.tickets_enabled and regform.ticket_on_email and not any(tickets_handled) and registration.state == RegistrationState.complete): attachments += get_ticket_attachments(registration) if not to_managers and registration.registration_form.complete_registration_attach_ical: event_ical = event_to_ical(registration.event) attachments.append(('invite.ics', event_ical, 'text/calendar')) template = get_template_module(f'events/registration/emails/{template}', registration=registration) to_list = registration.email if not to_managers else registration.registration_form.manager_notification_recipients from_address = registration.registration_form.sender_address if not to_managers else None mail = make_email(to_list=to_list, template=template, html=True, from_address=from_address, attachments=attachments) user = session.user if session else None send_email(mail, event=registration.registration_form.event, module='Registration', user=user, log_metadata={'registration_id': registration.id}) " 8898,"def test_list_parse_new_lines_strip(): option = types.ListAttribute('foo', strip=False) # strip isn't used for new-line based list attribute assert option.parse("""""" value 1 ""# value 2"" value 3 """""") == [ 'value 1', '# value 2', 'value 3', ] ","def test_list_parse_new_lines_strip(): option = types.ListAttribute('foo', strip=False) # strip isn't used for newline-based list attribute assert option.parse("""""" value 1 ""# value 2"" value 3 """""") == [ 'value 1', '# value 2', 'value 3', ] " 1554,"def test_scalar_fit_param(): # test that a scalar fit param is supported with a warning. # TODO: it should raise an error from v0.24. Issue #15805 class TestEstimator(BaseEstimator, ClassifierMixin): def __init__(self, a=None): self.a = a def fit(self, X, y, r): assert r == 42 def predict(self, X): return np.zeros(shape=(len(X))) cv = GridSearchCV(TestEstimator(), param_grid={'a': [1, 2]}) X, y = make_classification() with pytest.warns(FutureWarning, match=""Support for scaler fit params""): cv.fit(X, y, r=42) ","def test_scalar_fit_param(): # test that a scalar fit param is supported with a warning. # TODO: it should raise an error from v0.24. Issue #15805 class TestEstimator(BaseEstimator, ClassifierMixin): def __init__(self, a=None): self.a = a def fit(self, X, y, r): assert r == 42 def predict(self, X): return np.zeros(shape=(len(X))) cv = GridSearchCV(TestEstimator(), param_grid={'a': [1, 2]}) X, y = make_classification() with pytest.warns(FutureWarning, match=""Support for scalar fit params""): cv.fit(X, y, r=42) " 26970,"def warn_of_missing_files(files): print(f""[red]Check failed. Here are the files we expected but did not find:[/red]\n"") for file in files: print(f"" - [red]{file}[/red]"") ","def warn_of_missing_files(files): print(""[red]Check failed. Here are the files we expected but did not find:[/red]\n"") for file in files: print(f"" - [red]{file}[/red]"") " 2685,"def sort_by_row_values(graph, copy=True): """"""Sort a sparse graph such that each row is stored with increasing values. Parameters ---------- graph : sparse matrix, (n_samples, n_samples) Distance matrix to other samples, where only non-zero elements are considered neighbors. Matrix is converted to CSR format if not already. copy : bool, optional (default=True) If True, the graph is copied before sorting. If False, the sorting is performed inplace. If graph is not of CSR format, a copy is always returned. Returns ------- graph : sparse matrix, (n_samples, n_samples) Distance matrix to other samples, where only non-zero elements are considered neighbors. Matrix is of CSR format. """""" if graph.format not in (""csr"", ""csc"", ""coo"", ""lil""): raise TypeError( ""Sparse matrix in {!r} format is not supported due to "" ""its handling of explicit zeros"".format(graph.format) ) elif graph.format != ""csr"": graph = graph.asformat(""csr"") elif copy: graph = graph.copy() # if each sample has the same number of provided neighbors row_nnz = np.diff(graph.indptr) if row_nnz.max() == row_nnz.min(): n_samples = graph.shape[0] distances = graph.data.reshape(n_samples, -1) order = np.argsort(distances, kind=""mergesort"") order += np.arange(n_samples)[:, None] * row_nnz[0] order = order.ravel() graph.data = graph.data[order] graph.indices = graph.indices[order] else: for start, stop in zip(graph.indptr, graph.indptr[1:]): order = np.argsort(graph.data[start:stop], kind=""mergesort"") graph.data[start:stop] = graph.data[start:stop][order] graph.indices[start:stop] = graph.indices[start:stop][order] return graph ","def sort_by_row_values(graph, copy=True): """"""Sort a sparse graph such that each row is stored with increasing values. Parameters ---------- graph : sparse matrix, (n_samples, n_samples) Distance matrix to other samples, where only non-zero elements are considered neighbors. Matrix is converted to CSR format if not already. copy : bool, optional (default=True) If True, the graph is copied before sorting. If False, the sorting is performed inplace. If graph is not of CSR format, a copy is always returned. Returns ------- graph : sparse matrix of shape (n_samples, n_samples) Distance matrix to other samples, where only non-zero elements are considered neighbors. Matrix is of CSR format. """""" if graph.format not in (""csr"", ""csc"", ""coo"", ""lil""): raise TypeError( ""Sparse matrix in {!r} format is not supported due to "" ""its handling of explicit zeros"".format(graph.format) ) elif graph.format != ""csr"": graph = graph.asformat(""csr"") elif copy: graph = graph.copy() # if each sample has the same number of provided neighbors row_nnz = np.diff(graph.indptr) if row_nnz.max() == row_nnz.min(): n_samples = graph.shape[0] distances = graph.data.reshape(n_samples, -1) order = np.argsort(distances, kind=""mergesort"") order += np.arange(n_samples)[:, None] * row_nnz[0] order = order.ravel() graph.data = graph.data[order] graph.indices = graph.indices[order] else: for start, stop in zip(graph.indptr, graph.indptr[1:]): order = np.argsort(graph.data[start:stop], kind=""mergesort"") graph.data[start:stop] = graph.data[start:stop][order] graph.indices[start:stop] = graph.indices[start:stop][order] return graph " 30537,"def create_content_descriptor(version, asset_id, res, github_token, beta_rn=None): # time format example 2017 - 06 - 11T15:25:57.0 + 00:00 date = datetime.datetime.now().strftime(""%Y-%m-%dT%H:%M:%S.0+00:00"") release_notes = '## Demisto Content Release Notes for version {} ({})\n'.format(version, asset_id) release_notes += '##### Published on {}\n{}'.format(datetime.datetime.now().strftime(""%d %B %Y""), res) content_descriptor = { ""installDate"": ""0001-01-01T00:00:00Z"", ""assetId"": int(asset_id), ""releaseNotes"": release_notes, ""modified"": date, ""ignoreGit"": False, ""releaseDate"": date, ""version"": -1, ""release"": version, ""id"": """" } draft = get_release_notes_draft(github_token, asset_id) if draft: content_descriptor['releaseNotes'] = draft with open('content-descriptor.json', 'w') as outfile: json.dump(content_descriptor, outfile) with open('release-notes.md', 'w') as outfile: outfile.write(release_notes) print(""saving beta release notes"") with open('beta-release-notes.md', 'w') as outfile: beta_release_notes = '## Demisto Content Beta Release Notes for version {} ({})\n'.format(""5.5.0"", asset_id) beta_release_notes += '##### Published on {}\n{}'.format(datetime.datetime.now().strftime(""%d %B %Y""), beta_rn) outfile.write(beta_rn) ","def create_content_descriptor(version, asset_id, res, github_token, beta_rn=None): # time format example 2017 - 06 - 11T15:25:57.0 + 00:00 date = datetime.datetime.now().strftime(""%Y-%m-%dT%H:%M:%S.0+00:00"") release_notes = '## Demisto Content Release Notes for version {} ({})\n'.format(version, asset_id) release_notes += '##### Published on {}\n{}'.format(datetime.datetime.now().strftime(""%d %B %Y""), res) content_descriptor = { ""installDate"": ""0001-01-01T00:00:00Z"", ""assetId"": int(asset_id), ""releaseNotes"": release_notes, ""modified"": date, ""ignoreGit"": False, ""releaseDate"": date, ""version"": -1, ""release"": version, ""id"": """" } draft = get_release_notes_draft(github_token, asset_id) if draft: content_descriptor['releaseNotes'] = draft with open('content-descriptor.json', 'w') as outfile: json.dump(content_descriptor, outfile) with open('release-notes.md', 'w') as outfile: outfile.write(release_notes) print(""saving beta release notes"") with open('beta-release-notes.md', 'w') as outfile: beta_release_notes = '## Demisto Content Beta Release Notes for version {} ({})\n'.format(version, asset_id) beta_release_notes += '##### Published on {}\n{}'.format(datetime.datetime.now().strftime(""%d %B %Y""), beta_rn) outfile.write(beta_rn) " 26954,"def context_to_airflow_vars(context, in_env_var_format=False): """""" Given a context, this function provides a dictionary of values that can be used to externally reconstruct relations between dags, dag_runs, tasks and task_instances. Default to abc.def.ghi format and can be made to ABC_DEF_GHI format if in_env_var_format is set to True. :param context: The context for the task_instance of interest. :type context: dict :param in_env_var_format: If returned vars should be in ABC_DEF_GHI format. :type in_env_var_format: bool :return: task_instance context as dict. """""" params = {} if in_env_var_format: name_format = 'env_var_format' else: name_format = 'default' task = context.get('task') task_instance = context.get('task_instance') dag_run = context.get('dag_run') ops = [ (task, 'email', 'AIRFLOW_CONTEXT_DAG_EMAIL'), (task, 'owner', 'AIRFLOW_CONTEXT_DAG_OWNER'), (task_instance, 'dag_id', 'AIRFLOW_CONTEXT_DAG_ID'), (task_instance, 'task_id', 'AIRFLOW_CONTEXT_TASK_ID'), (task_instance, 'execution_date', 'AIRFLOW_CONTEXT_EXECUTION_DATE'), (dag_run, 'run_id', 'AIRFLOW_CONTEXT_DAG_RUN_ID'), ] for subject, attr, mapping_key in ops: _attr = getattr(subject, attr, None) if subject and _attr: mapping_value = AIRFLOW_VAR_NAME_FORMAT_MAPPING[mapping_key][name_format] if isinstance(_attr, str): params[mapping_value] = _attr if isinstance(_attr, datetime): params[mapping_value] = _attr.isoformat() elif isinstance(_attr, list): # os env variable value needs to be string params[mapping_value] = ','.join(_attr) return params ","def context_to_airflow_vars(context, in_env_var_format=False): """""" Given a context, this function provides a dictionary of values that can be used to externally reconstruct relations between dags, dag_runs, tasks and task_instances. Default to abc.def.ghi format and can be made to ABC_DEF_GHI format if in_env_var_format is set to True. :param context: The context for the task_instance of interest. :type context: dict :param in_env_var_format: If returned vars should be in ABC_DEF_GHI format. :type in_env_var_format: bool :return: task_instance context as dict. """""" params = {} if in_env_var_format: name_format = 'env_var_format' else: name_format = 'default' task = context.get('task') task_instance = context.get('task_instance') dag_run = context.get('dag_run') ops = [ (task, 'email', 'AIRFLOW_CONTEXT_DAG_EMAIL'), (task, 'owner', 'AIRFLOW_CONTEXT_DAG_OWNER'), (task_instance, 'dag_id', 'AIRFLOW_CONTEXT_DAG_ID'), (task_instance, 'task_id', 'AIRFLOW_CONTEXT_TASK_ID'), (task_instance, 'execution_date', 'AIRFLOW_CONTEXT_EXECUTION_DATE'), (dag_run, 'run_id', 'AIRFLOW_CONTEXT_DAG_RUN_ID'), ] for subject, attr, mapping_key in ops: _attr = getattr(subject, attr, None) if subject and _attr: mapping_value = AIRFLOW_VAR_NAME_FORMAT_MAPPING[mapping_key][name_format] if isinstance(_attr, str): params[mapping_value] = _attr elif isinstance(_attr, datetime): params[mapping_value] = _attr.isoformat() elif isinstance(_attr, list): # os env variable value needs to be string params[mapping_value] = ','.join(_attr) return params " 52657,"def main() -> None: """""" Entrypoint for the forking launcher. """""" # First argument is the path to the database config db_config_path = sys.argv[1] # Split up the subsequent arguments into each workers' arguments; # `--` is our delimiter of choice. args = sys.argv[2:] args_by_worker: List[List[str]] = [ list(args) for cond, args in itertools.groupby(args, lambda ele: ele != ""--"") if cond and args ] # Prevent Twisted from installing a shared reactor that all the workers will # inherit when we fork(), by installing our own beforehand. proxy_reactor = ProxiedReactor() installReactor(proxy_reactor) # Import the entrypoints for all the workers. worker_functions = [] for worker_args in args_by_worker: worker_module = importlib.import_module(worker_args[0]) worker_functions.append(worker_module.main) # At this point, we've imported all the main entrypoints for all the workers. # Now we basically just fork() out to create the workers we need. # Because we're using fork(), all the workers get a clone of this launcher's # memory space and don't need to repeat the work of loading the code! # Instead of using fork() directly, we use the multiprocessing library,# # which uses fork() on Unix platforms. # We need to prepare the database first as otherwise all the workers will # try to create a schema version table and some will crash out. from synapse._scripts import update_synapse_database update_proc = multiprocessing.Process( target=_worker_entrypoint, args=( update_synapse_database.main, proxy_reactor, [ ""update_synapse_database"", ""--database-config"", db_config_path, ""--run-background-updates"", ], ), ) print(""===== PREPARING DATABASE ====="", file=sys.stderr) update_proc.start() update_proc.join() print(""===== PREPARED DATABASE ====="", file=sys.stderr) processes = [] for (func, worker_args) in zip(worker_functions, args_by_worker): process = multiprocessing.Process( target=_worker_entrypoint, args=(func, proxy_reactor, worker_args) ) process.start() processes.append(process) # Be a good parent and wait for our children to die before exiting. for process in processes: process.join() ","def main() -> None: """""" Entrypoint for the forking launcher. """""" parser = argparse.ArgumentParser() parser.add_argument(""db_config"", help=""Path to database config file"") parser.add_argument(""args"", nargs=""..."") ns = parser.parse_args() db_config_path = ns.db_config args = ns.args args_by_worker: List[List[str]] = [ list(args) for cond, args in itertools.groupby(args, lambda ele: ele != ""--"") if cond and args ] # Prevent Twisted from installing a shared reactor that all the workers will # inherit when we fork(), by installing our own beforehand. proxy_reactor = ProxiedReactor() installReactor(proxy_reactor) # Import the entrypoints for all the workers. worker_functions = [] for worker_args in args_by_worker: worker_module = importlib.import_module(worker_args[0]) worker_functions.append(worker_module.main) # At this point, we've imported all the main entrypoints for all the workers. # Now we basically just fork() out to create the workers we need. # Because we're using fork(), all the workers get a clone of this launcher's # memory space and don't need to repeat the work of loading the code! # Instead of using fork() directly, we use the multiprocessing library,# # which uses fork() on Unix platforms. # We need to prepare the database first as otherwise all the workers will # try to create a schema version table and some will crash out. from synapse._scripts import update_synapse_database update_proc = multiprocessing.Process( target=_worker_entrypoint, args=( update_synapse_database.main, proxy_reactor, [ ""update_synapse_database"", ""--database-config"", db_config_path, ""--run-background-updates"", ], ), ) print(""===== PREPARING DATABASE ====="", file=sys.stderr) update_proc.start() update_proc.join() print(""===== PREPARED DATABASE ====="", file=sys.stderr) processes = [] for (func, worker_args) in zip(worker_functions, args_by_worker): process = multiprocessing.Process( target=_worker_entrypoint, args=(func, proxy_reactor, worker_args) ) process.start() processes.append(process) # Be a good parent and wait for our children to die before exiting. for process in processes: process.join() " 29259,"def send_email_to_recipients( sender_email: str, recipient_emails: List[str], subject: str, plaintext_body: str, html_body: str, bcc: Optional[List[str]] = None, reply_to: Optional[str] = None, recipient_variables: Optional[ Dict[str, Dict[str, Union[str, float]]]] = None ) -> bool: """"""Send POST HTTP request to mailgun api. This method is adopted from the requests library's post method. Args: sender_email: str. The email address of the sender. This should be in the form 'SENDER_NAME ' or 'SENDER_EMAIL_ADDRESS'. Must be utf-8. recipient_emails: list(str). The email addresses of the recipients. Must be utf-8. subject: str. The subject line of the email, Must be utf-8. plaintext_body: str. The plaintext body of the email. Must be utf-8. html_body: str. The HTML body of the email. Must fit in a datastore entity. Must be utf-8. bcc: list(str)|None. Optional argument. List of bcc emails. reply_to: str|None. Optional argument. Reply address formatted like “reply+@ reply_id is the unique id of the sender. recipient_variables: dict|None. Optional argument. If batch sending requires differentiating each email based on the recipient, we assign a unique id to each recipient, including info relevant to that recipient so that we can reference it when composing the email like so: recipient_variables = {""bob@example.com"": {""first"":""Bob"", ""id"":1}, ""alice@example.com"": {""first"":""Alice"", ""id"":2}} subject = 'Hey, %recipient.first%’ More info about this format at: https://documentation.mailgun.com/en/ latest/user_manual.html#batch-sending. Raises: Exception. The mailgun api key is not stored in feconf.MAILGUN_API_KEY. Exception. The mailgun domain name is not stored in feconf.MAILGUN_DOMAIN_NAME. Returns: bool. Whether the emails are sent successfully. """""" if not feconf.MAILGUN_API_KEY: raise Exception('Mailgun API key is not available.') if not feconf.MAILGUN_DOMAIN_NAME: raise Exception('Mailgun domain name is not set.') # To send bulk emails we pass list of recipients in 'to' paarameter of # post data. Maximum limit of recipients per request is 1000. # For more detail check following link: # https://documentation.mailgun.com/user_manual.html#batch-sending recipient_email_lists = [ recipient_emails[i:i + 1000] for i in range(0, len(recipient_emails), 1000)] for email_list in recipient_email_lists: data = { 'from': sender_email, 'subject': subject.encode('utf-8'), 'text': plaintext_body.encode('utf-8'), 'html': html_body.encode('utf-8'), 'to': email_list[0] if len(email_list) == 1 else email_list } if bcc: data['bcc'] = bcc[0] if len(bcc) == 1 else bcc if reply_to: data['h:Reply-To'] = reply_to # 'recipient-variable' in post data forces mailgun to send individual # email to each recipient (This is intended to be a workaround for # sending individual emails). data['recipient_variables'] = recipient_variables or {} # The b64encode accepts and returns bytes, so we first need to encode # the MAILGUN_API_KEY to bytes, then decode the returned bytes back # to string. base64_mailgun_api_key = base64.b64encode( b'api:%b' % feconf.MAILGUN_API_KEY.encode('utf-8') ).strip().decode('utf-8') auth_str = 'Basic %s' % base64_mailgun_api_key header = {'Authorization': auth_str} server = ( ('https://api.mailgun.net/v3/%s/messages') % feconf.MAILGUN_DOMAIN_NAME) # The 'ascii' is used here, because only ASCII char are allowed in url, # also the docs recommend this approach: # https://docs.python.org/3.7/library/urllib.request.html#urllib-examples encoded_url = urllib.parse.urlencode(data).encode('ascii') req = urlrequest.Request(server, encoded_url, header) resp = utils.url_open(req) # The function url_open returns a file_like object that can be queried # for the status code of the url query. If it is not 200, the mail query # failed so we return False (this function did not complete # successfully). if resp.getcode() != 200: return False return True ","def send_email_to_recipients( sender_email: str, recipient_emails: List[str], subject: str, plaintext_body: str, html_body: str, bcc: Optional[List[str]] = None, reply_to: Optional[str] = None, recipient_variables: Optional[ Dict[str, Dict[str, Union[str, float]]]] = None ) -> bool: """"""Send POST HTTP request to mailgun api. This method is adopted from the requests library's post method. Args: sender_email: str. The email address of the sender. This should be in the form 'SENDER_NAME ' or 'SENDER_EMAIL_ADDRESS'. Must be utf-8. recipient_emails: list(str). The email addresses of the recipients. Must be utf-8. subject: str. The subject line of the email, Must be utf-8. plaintext_body: str. The plaintext body of the email. Must be utf-8. html_body: str. The HTML body of the email. Must fit in a datastore entity. Must be utf-8. bcc: list(str)|None. Optional argument. List of bcc emails. reply_to: str|None. Optional argument. Reply address formatted like “reply+@ reply_id is the unique id of the sender. recipient_variables: dict|None. Optional argument. If batch sending requires differentiating each email based on the recipient, we assign a unique id to each recipient, including info relevant to that recipient so that we can reference it when composing the email like so: recipient_variables = {""bob@example.com"": {""first"":""Bob"", ""id"":1}, ""alice@example.com"": {""first"":""Alice"", ""id"":2}} subject = 'Hey, %recipient.first%’ More info about this format at: https://documentation.mailgun.com/en/ latest/user_manual.html#batch-sending. Raises: Exception. The mailgun api key is not stored in feconf.MAILGUN_API_KEY. Exception. The mailgun domain name is not stored in feconf.MAILGUN_DOMAIN_NAME. Returns: bool. Whether the emails are sent successfully. """""" if not feconf.MAILGUN_API_KEY: raise Exception('Mailgun API key is not available.') if not feconf.MAILGUN_DOMAIN_NAME: raise Exception('Mailgun domain name is not set.') # To send bulk emails we pass list of recipients in 'to' paarameter of # post data. Maximum limit of recipients per request is 1000. # For more detail check following link: # https://documentation.mailgun.com/user_manual.html#batch-sending recipient_email_lists = [ recipient_emails[i:i + 1000] for i in range(0, len(recipient_emails), 1000)] for email_list in recipient_email_lists: data = { 'from': sender_email, 'subject': subject.encode('utf-8'), 'text': plaintext_body.encode('utf-8'), 'html': html_body.encode('utf-8'), 'to': email_list[0] if len(email_list) == 1 else email_list } if bcc: data['bcc'] = bcc[0] if len(bcc) == 1 else bcc if reply_to: data['h:Reply-To'] = reply_to # 'recipient-variable' in post data forces mailgun to send individual # email to each recipient (This is intended to be a workaround for # sending individual emails). data['recipient_variables'] = recipient_variables or {} # The b64encode accepts and returns bytes, so we first need to encode # the MAILGUN_API_KEY to bytes, then decode the returned bytes back # to string. base64_mailgun_api_key = base64.b64encode( b'api:%b' % feconf.MAILGUN_API_KEY.encode('utf-8') ).strip().decode('utf-8') auth_str = 'Basic %s' % base64_mailgun_api_key header = {'Authorization': auth_str} server = ( ('https://api.mailgun.net/v3/%s/messages') % feconf.MAILGUN_DOMAIN_NAME) # The 'ascii' is used here, because only ASCII char are allowed in url, # also the docs recommend this approach: # https://docs.python.org/3.7/library/urllib.request.html#urllib-examples encoded_url = urllib.parse.urlencode(data).encode('ascii') req = urllib.request.Request(server, encoded_url, header) resp = utils.url_open(req) # The function url_open returns a file_like object that can be queried # for the status code of the url query. If it is not 200, the mail query # failed so we return False (this function did not complete # successfully). if resp.getcode() != 200: return False return True " 7112,"def cycle_spin(x, func, max_shifts, shift_steps=1, num_workers=None, multichannel=False, func_kw={}): """"""Cycle spinning (repeatedly apply func to shifted versions of x). Parameters ---------- x : array-like Data for input to ``func``. func : function A function to apply to circular shifted versions of ``x``. Should take ``x`` as its first argument. Any additional arguments can be supplied via ``func_kw``. max_shifts : int or tuple If an integer, shifts in ``range(0, max_shifts+1)`` will be used along each axis of ``x``. If a tuple, ``range(0, max_shifts[i]+1)`` will be along axis i. shift_steps : int or tuple, optional The step size for the shifts applied along axis, i, are:: ``range((0, max_shifts[i]+1, shift_steps[i]))``. If an integer is provided, the same step size is used for all axes. num_workers : int or None, optional The number of parallel threads to use during cycle spinning. If set to ``None``, the full set of available cores are used. multichannel : bool, optional Whether to treat the final axis as channels (no cycle shifts are performed over the channels axis). func_kw : dict, optional Additional keyword arguments to supply to ``func``. Returns ------- avg_y : np.ndarray The output of ``func(x, **func_kw)`` averaged over all combinations of the specified axis shifts. Notes ----- Cycle spinning was proposed as a way to approach shift-invariance via performing several circular shifts of a shift-variant transform [1]_. For a n-level discrete wavelet transforms, one may wish to perform all shifts up to ``max_shifts = 2**n - 1``. In practice, much of the benefit can often be realized with only a small number of shifts per axis. For transforms such as the blockwise discrete cosine transform, one may wish to evaluate shifts up to the block size used by the transform. References ---------- .. [1] R.R. Coifman and D.L. Donoho. ""Translation-Invariant De-Noising"". Wavelets and Statistics, Lecture Notes in Statistics, vol.103. Springer, New York, 1995, pp.125-150. :DOI:`10.1007/978-1-4612-2544-7_9` Examples -------- >>> import skimage.data >>> from skimage import img_as_float >>> from skimage.restoration import denoise_wavelet, cycle_spin >>> img = img_as_float(skimage.data.camera()) >>> sigma = 0.1 >>> img = img + sigma * np.random.standard_normal(img.shape) >>> denoised = cycle_spin(img, func=denoise_wavelet, max_shifts=3) """""" x = np.asanyarray(x) all_shifts = _generate_shifts(x.ndim, multichannel, max_shifts, shift_steps) all_shifts = list(all_shifts) def _run_one_shift(shift): # shift, apply function, inverse shift xs = _roll_axes(x, shift) tmp = func(xs, **func_kw) return _roll_axes(tmp, -np.asarray(shift)) # compute a running average across the cycle shifts if num_workers == 1: # serial processing mean = _run_one_shift(all_shifts[0]) for shift in all_shifts[1:]: mean += _run_one_shift(shift) mean /= len(all_shifts) else: # multithreaded via dask futures = [dask.delayed(_run_one_shift)(s) for s in all_shifts] mean = sum(futures) / len(futures) mean = mean.compute(num_workers=num_workers) return mean ","def cycle_spin(x, func, max_shifts, shift_steps=1, num_workers=None, multichannel=False, func_kw={}): """"""Cycle spinning (repeatedly apply func to shifted versions of x). Parameters ---------- x : array-like Data for input to ``func``. func : function A function to apply to circularly shifted versions of ``x``. Should take ``x`` as its first argument. Any additional arguments can be supplied via ``func_kw``. max_shifts : int or tuple If an integer, shifts in ``range(0, max_shifts+1)`` will be used along each axis of ``x``. If a tuple, ``range(0, max_shifts[i]+1)`` will be along axis i. shift_steps : int or tuple, optional The step size for the shifts applied along axis, i, are:: ``range((0, max_shifts[i]+1, shift_steps[i]))``. If an integer is provided, the same step size is used for all axes. num_workers : int or None, optional The number of parallel threads to use during cycle spinning. If set to ``None``, the full set of available cores are used. multichannel : bool, optional Whether to treat the final axis as channels (no cycle shifts are performed over the channels axis). func_kw : dict, optional Additional keyword arguments to supply to ``func``. Returns ------- avg_y : np.ndarray The output of ``func(x, **func_kw)`` averaged over all combinations of the specified axis shifts. Notes ----- Cycle spinning was proposed as a way to approach shift-invariance via performing several circular shifts of a shift-variant transform [1]_. For a n-level discrete wavelet transforms, one may wish to perform all shifts up to ``max_shifts = 2**n - 1``. In practice, much of the benefit can often be realized with only a small number of shifts per axis. For transforms such as the blockwise discrete cosine transform, one may wish to evaluate shifts up to the block size used by the transform. References ---------- .. [1] R.R. Coifman and D.L. Donoho. ""Translation-Invariant De-Noising"". Wavelets and Statistics, Lecture Notes in Statistics, vol.103. Springer, New York, 1995, pp.125-150. :DOI:`10.1007/978-1-4612-2544-7_9` Examples -------- >>> import skimage.data >>> from skimage import img_as_float >>> from skimage.restoration import denoise_wavelet, cycle_spin >>> img = img_as_float(skimage.data.camera()) >>> sigma = 0.1 >>> img = img + sigma * np.random.standard_normal(img.shape) >>> denoised = cycle_spin(img, func=denoise_wavelet, max_shifts=3) """""" x = np.asanyarray(x) all_shifts = _generate_shifts(x.ndim, multichannel, max_shifts, shift_steps) all_shifts = list(all_shifts) def _run_one_shift(shift): # shift, apply function, inverse shift xs = _roll_axes(x, shift) tmp = func(xs, **func_kw) return _roll_axes(tmp, -np.asarray(shift)) # compute a running average across the cycle shifts if num_workers == 1: # serial processing mean = _run_one_shift(all_shifts[0]) for shift in all_shifts[1:]: mean += _run_one_shift(shift) mean /= len(all_shifts) else: # multithreaded via dask futures = [dask.delayed(_run_one_shift)(s) for s in all_shifts] mean = sum(futures) / len(futures) mean = mean.compute(num_workers=num_workers) return mean " 8253,"def get_components1d_list(): components1d_list = [c_name for c_name in dir(components1d) if '_' not in c_name] # Remove EELSCLEdge, since it is tested elsewhere more appropriate components1d_list.remove('EELSCLEdge') return components1d_list ","def get_components1d_list(): components1d_list = [c_name for c_name in dir(components1d) if not cname.startswith('_')] # Remove EELSCLEdge, since it is tested elsewhere more appropriate components1d_list.remove('EELSCLEdge') return components1d_list " 34429,"def is_conversation_test_file(file_path: Text) -> bool: """"""Checks if a file is a Rasa conversation test file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a conversation test file, otherwise `False`. """""" if not file_path.endswith("".md""): return False try: dirname = os.path.dirname(file_path) return is_story_file(file_path) and DEFAULT_E2E_TESTS_PATH in dirname except Exception as e: # catch-all because we might be loading files we are not expecting to load logger.error( f""Tried to check if '{file_path}' is a conversation test file, but failed "" f""to read it. If this file contains conversation test data, you should "" f""investigate this error, otherwise it is probably best to "" f""move the file to a different location. "" f""Error: {e}"" ) return False ","def is_end_to_end_conversation_test_file(file_path: Text) -> bool: """"""Checks if a file is a Rasa conversation test file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a conversation test file, otherwise `False`. """""" if not file_path.endswith("".md""): return False try: dirname = os.path.dirname(file_path) return is_story_file(file_path) and DEFAULT_E2E_TESTS_PATH in dirname except Exception as e: # catch-all because we might be loading files we are not expecting to load logger.error( f""Tried to check if '{file_path}' is a conversation test file, but failed "" f""to read it. If this file contains conversation test data, you should "" f""investigate this error, otherwise it is probably best to "" f""move the file to a different location. "" f""Error: {e}"" ) return False " 43647,"def x_mixer(wires): r""""""""Creates the basic Pauli-X mixer Hamiltonian used in the original `QAOA paper `__, defined as: .. math:: H_M \ = \ \displaystyle\sum_{i} X_{i}, where :math:`i` ranges over all qubits, and :math:`X_i` denotes the Pauli-X on the :math:`i`-th qubit. Args: qubits (Iterable or Wires): The collection of wires to which the observables in the Hamiltonian correspond. """""" ############## # Input checks wires = Wires(wires) ############## coeffs = [1 for i in wires] obs = [qml.PauliX(i) for i in wires] return qml.Hamiltonian(coeffs, obs) ","def x_mixer(wires): r""""""""Creates the basic Pauli-X mixer Hamiltonian used in the original `QAOA paper `__, defined as: .. math:: H_M \ = \ \displaystyle\sum_{i} X_{i}, where :math:`i` ranges over all qubits, and :math:`X_i` denotes the Pauli-X on the :math:`i`-th qubit. Args: wires (Iterable or Wires): The collection of wires to which the observables in the Hamiltonian correspond. """""" ############## # Input checks wires = Wires(wires) ############## coeffs = [1 for i in wires] obs = [qml.PauliX(i) for i in wires] return qml.Hamiltonian(coeffs, obs) " 43771,"def gradient(H, x, delta=0.005291772): r""""""Compute the gradient :math:`\nabla_x \hat{H}(x)` of the electronic Hamiltonian :math:`\hat{H}(x)` for a given set of nuclear coordinates :math:`x` using central differences. Args: H (callable): function with signature ``H(x)`` that builds the electronic Hamiltonian for a given set of coordinates ``x`` x (array[float]): 1D array with the coordinates in Angstroms. The size of the array should be ``3*N`` where ``N`` is the number of atoms in the molecule. delta (float): Step size in Angstroms used to displace the nuclear coordinates. Its default value corresponds to 0.01 Bohr radius. Returns: Iterable[pennylane.Hamiltonian]: list with the gradient vector :math:`\nabla_x \hat{H}(x)` **Example** >>> def H(x): ... return qml.qchem.molecular_hamiltonian(['H', 'H'], x)[0] >>> x = np.array([0., 0., 0.35, 0., 0., -0.35]) >>> grad = gradient(H, x) >>> print(len(grad), grad[5]) 6 (0.7763135743293005) [I0] + (0.08534360840293387) [Z0] + (0.08534360840293387) [Z1] + (-0.2669341092545041) [Z2] + (-0.26693410925450134) [Z3] + (0.025233628744274508) [Z0 Z1] + (-0.0072162443961340415) [Y0 X1 X2 Y3] + (0.0072162443961340415) [Y0 Y1 X2 X3] + (0.0072162443961340415) [X0 X1 Y2 Y3] + (-0.0072162443961340415) [X0 Y1 Y2 X3] + (0.030654287745411964) [Z0 Z2] + (0.023438043349280003) [Z0 Z3] + (0.023438043349280003) [Z1 Z2] + (0.030654287745411964) [Z1 Z3] + (0.02494407786332001) [Z2 Z3] """""" grad = [derivative(H, x, i, delta=delta) for i in range(x.size)] return grad ","def gradient(H, x, delta=0.005291772): r""""""Compute the gradient :math:`\nabla_x \hat{H}(x)` of the electronic Hamiltonian :math:`\hat{H}(x)` for a given set of nuclear coordinates :math:`x` using central differences. Args: H (callable): function with signature ``H(x)`` that builds the electronic Hamiltonian for a given set of coordinates ``x`` x (array[float]): 1D array with the coordinates in Angstroms. The size of the array should be ``3*N`` where ``N`` is the number of atoms in the molecule. delta (float): Step size in Angstroms used to displace the nuclear coordinates. Its default value corresponds to 0.01 Bohr radius. Returns: Iterable[pennylane.Hamiltonian]: list with the gradient vector :math:`\nabla_x \hat{H}(x)`. Each entry of the gradient is an operator. **Example** >>> def H(x): ... return qml.qchem.molecular_hamiltonian(['H', 'H'], x)[0] >>> x = np.array([0., 0., 0.35, 0., 0., -0.35]) >>> grad = gradient(H, x) >>> print(len(grad), grad[5]) 6 (0.7763135743293005) [I0] + (0.08534360840293387) [Z0] + (0.08534360840293387) [Z1] + (-0.2669341092545041) [Z2] + (-0.26693410925450134) [Z3] + (0.025233628744274508) [Z0 Z1] + (-0.0072162443961340415) [Y0 X1 X2 Y3] + (0.0072162443961340415) [Y0 Y1 X2 X3] + (0.0072162443961340415) [X0 X1 Y2 Y3] + (-0.0072162443961340415) [X0 Y1 Y2 X3] + (0.030654287745411964) [Z0 Z2] + (0.023438043349280003) [Z0 Z3] + (0.023438043349280003) [Z1 Z2] + (0.030654287745411964) [Z1 Z3] + (0.02494407786332001) [Z2 Z3] """""" grad = [derivative(H, x, i, delta=delta) for i in range(x.size)] return grad " 46286,"def test_plugin_widgets_menus(test_plugin_widgets, make_napari_viewer): """"""Test the plugin widgets get added to the window menu correctly."""""" viewer = make_napari_viewer() # only take the plugin actions actions = viewer.window.plugins_menu.actions() for cnt, action in enumerate(actions): if action.text() == """": # this is the separator break actions = actions[cnt + 1 :] texts = [a.text() for a in actions] for t in ['TestP1', 'TestP2: Widg3', 'TestP3: magic']: assert t in texts # the first item of the plugins is a submenu (for ""Test plugin1"") tp1 = next(m for m in actions if m.text() == 'TestP1') assert tp1.menu() assert [a.text() for a in tp1.menu().actions()] == ['Widg1', 'Widg2'] ","def test_plugin_widgets_menus(test_plugin_widgets, make_napari_viewer): """"""Test the plugin widgets get added to the window menu correctly."""""" viewer = make_napari_viewer() # only take the plugin actions actions = viewer.window.plugins_menu.actions() for cnt, action in enumerate(actions): if action.text() == """": # this is the separator break actions = actions[cnt + 1 :] texts = [a.text() for a in actions] for t in ['TestP1', 'TestP2: Widg3', 'TestP3: magic']: assert t in texts # Expect a submenu (""Test plugin1"") with particular entries. tp1 = next(m for m in actions if m.text() == 'TestP1') assert tp1.menu() assert [a.text() for a in tp1.menu().actions()] == ['Widg1', 'Widg2'] " 6570,"def setup(): add_permissions() ","def setup(company=None, patch=True): add_permissions() " 30536,"def find_indicators_with_limit(indicator_query: str, limit: int, offset: int) -> list: """""" Finds indicators using demisto.searchIndicators """""" # calculate the starting page (each page holds 200 entries) if offset: next_page = int(offset / 200) # set the offset from the starting page parsed_offset = offset - (200 * next_page) else: next_page = 0 parsed_offset = 0 iocs, _ = find_indicators_with_limit_loop(indicator_query, limit, next_page=next_page) return iocs[parsed_offset:limit + parsed_offset] ","def find_indicators_with_limit(indicator_query: str, limit: int, offset: int) -> list: """""" Finds indicators using demisto.searchIndicators """""" # calculate the starting page (each page holds 200 entries) if offset: next_page = int(offset / 200) # set the offset from the starting page parsed_offset = offset - (PAGE_SIZE * next_page) else: next_page = 0 parsed_offset = 0 iocs, _ = find_indicators_with_limit_loop(indicator_query, limit, next_page=next_page) return iocs[parsed_offset:limit + parsed_offset] " 14631,"def _munge_featureset_name(featureset): """""" Joins features in ``featureset`` by '+' if ``featureset`` is not a string. Otherwise, returns ``featureset``. Parameters ---------- featureset : SKLL.FeatureSet A SKLL ``FeatureSet`` object. Returns ------- res : str ``featureset`` names joined with '+', if ``featureset`` is not a string. """""" if isinstance(featureset, str): return featureset res = '+'.join(sorted(featureset)) return res ","def _munge_featureset_name(featureset): """""" Joins features in ``featureset`` with a '+' if ``featureset`` is not a string. Otherwise, returns ``featureset``. Parameters ---------- featureset : SKLL.FeatureSet A SKLL ``FeatureSet`` object. Returns ------- res : str ``featureset`` names joined with '+', if ``featureset`` is not a string. """""" if isinstance(featureset, str): return featureset res = '+'.join(sorted(featureset)) return res " 2123,"def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, return_n_test_samples=False, return_times=False, return_estimator=False, split_progress=None, candidate_progress=None, error_score=np.nan): """"""Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. scorer : A single callable or dict mapping scorer name to the callable If it is a single callable, the return value for ``train_scores`` and ``test_scores`` is a single float. For a dict, it should be one mapping the scorer name to the scorer callable object / function. The callable object / fn should have signature ``scorer(estimator, X, y)``. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. verbose : int The verbosity level. error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : bool, default=False Compute and return score on training set. return_parameters : bool, default=False Return parameters that has been used for the estimator. split_progress : list or tuple, default=None A list or tuple of format (, ). candidate_progress : list or tuple, default=None A list or tuple of format (, ). return_n_test_samples : bool, default=False Whether to return the ``n_test_samples``. return_times : bool, default=False Whether to return the fit/score times. return_estimator : bool, default=False Whether to return the fitted estimator. Returns ------- result : dict with the following attributes train_scores : dict of scorer name -> float Score on training set (for all the scorers), returned only if `return_train_score` is `True`. test_scores : dict of scorer name -> float Score on testing set (for all the scorers). n_test_samples : int Number of test samples. fit_time : float Time spent for fitting in seconds. score_time : float Time spent for scoring in seconds. parameters : dict or None The parameters that have been evaluated. estimator : estimator object The fitted estimator. fit_failed : bool The estimator failed to fit. """""" progress_msg = """" if verbose > 2: if split_progress is not None: progress_msg = f"" {split_progress[0]+1}/{split_progress[1]}"" if candidate_progress and verbose > 9: progress_msg += (f""; {candidate_progress[0]+1}/"" f""{candidate_progress[1]}"") if verbose > 1: if parameters is None: params_msg = '' else: sorted_keys = sorted(parameters) # Ensure deterministic o/p params_msg = (', '.join(f'{k}={parameters[k]}' for k in sorted_keys)) if verbose > 9: start_msg = f""[CV{progress_msg}] START {params_msg}"" print(f""{start_msg}{(80 - len(start_msg)) * '.'}"") # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = _check_fit_params(X, fit_params, train) if parameters is not None: # clone after setting parameters in case any parameters # are estimators (like pipeline steps) # because pipeline doesn't clone steps in fit cloned_parameters = {} for k, v in parameters.items(): cloned_parameters[k] = clone(v, safe=False) estimator = estimator.set_params(**cloned_parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) result = {} try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: # Note fit time as time until error fit_time = time.time() - start_time score_time = 0.0 if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): if isinstance(scorer, dict): test_scores = {name: error_score for name in scorer} if return_train_score: train_scores = test_scores.copy() else: test_scores = error_score if return_train_score: train_scores = error_score warnings.warn(""Estimator fit failed. The score on this train-test"" "" partition for these parameters will be set to %f. "" ""Details: \n%s"" % (error_score, format_exc()), FitFailedWarning) else: raise ValueError(""error_score must be the string 'raise' or a"" "" numeric value. (Hint: if using 'raise', please"" "" make sure that it has been spelled correctly.)"") result[""fit_failed""] = True else: result[""fit_failed""] = False fit_time = time.time() - start_time test_scores = _score(estimator, X_test, y_test, scorer) score_time = time.time() - start_time - fit_time if return_train_score: train_scores = _score(estimator, X_train, y_train, scorer) if verbose > 1: total_time = score_time + fit_time end_msg = f""[CV{progress_msg}] END "" result_msg = params_msg + ("";"" if params_msg else """") if verbose > 2 and isinstance(test_scores, dict): for scorer_name in sorted(test_scores): result_msg += f"" {scorer_name}: ("" if return_train_score: scorer_scores = train_scores[scorer_name] result_msg += f""train={scorer_scores:.3f}, "" result_msg += f""test={test_scores[scorer_name]:.3f})"" result_msg += f"" total time={logger.short_format_time(total_time)}"" # Right align the result_msg end_msg += ""."" * (80 - len(end_msg) - len(result_msg)) end_msg += result_msg print(end_msg) result[""test_scores""] = test_scores if return_train_score: result[""train_scores""] = train_scores if return_n_test_samples: result[""n_test_samples""] = _num_samples(X_test) if return_times: result[""fit_time""] = fit_time result[""score_time""] = score_time if return_parameters: result[""parameters""] = parameters if return_estimator: result[""estimator""] = estimator return result ","def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, return_n_test_samples=False, return_times=False, return_estimator=False, split_progress=None, candidate_progress=None, error_score=np.nan): """"""Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. scorer : A single callable or dict mapping scorer name to the callable If it is a single callable, the return value for ``train_scores`` and ``test_scores`` is a single float. For a dict, it should be one mapping the scorer name to the scorer callable object / function. The callable object / fn should have signature ``scorer(estimator, X, y)``. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. verbose : int The verbosity level. error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : bool, default=False Compute and return score on training set. return_parameters : bool, default=False Return parameters that has been used for the estimator. split_progress : list or tuple, default=None A list or tuple of format (, ). candidate_progress : {list, tuple} of int, default=None A list or tuple of format (, ). return_n_test_samples : bool, default=False Whether to return the ``n_test_samples``. return_times : bool, default=False Whether to return the fit/score times. return_estimator : bool, default=False Whether to return the fitted estimator. Returns ------- result : dict with the following attributes train_scores : dict of scorer name -> float Score on training set (for all the scorers), returned only if `return_train_score` is `True`. test_scores : dict of scorer name -> float Score on testing set (for all the scorers). n_test_samples : int Number of test samples. fit_time : float Time spent for fitting in seconds. score_time : float Time spent for scoring in seconds. parameters : dict or None The parameters that have been evaluated. estimator : estimator object The fitted estimator. fit_failed : bool The estimator failed to fit. """""" progress_msg = """" if verbose > 2: if split_progress is not None: progress_msg = f"" {split_progress[0]+1}/{split_progress[1]}"" if candidate_progress and verbose > 9: progress_msg += (f""; {candidate_progress[0]+1}/"" f""{candidate_progress[1]}"") if verbose > 1: if parameters is None: params_msg = '' else: sorted_keys = sorted(parameters) # Ensure deterministic o/p params_msg = (', '.join(f'{k}={parameters[k]}' for k in sorted_keys)) if verbose > 9: start_msg = f""[CV{progress_msg}] START {params_msg}"" print(f""{start_msg}{(80 - len(start_msg)) * '.'}"") # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = _check_fit_params(X, fit_params, train) if parameters is not None: # clone after setting parameters in case any parameters # are estimators (like pipeline steps) # because pipeline doesn't clone steps in fit cloned_parameters = {} for k, v in parameters.items(): cloned_parameters[k] = clone(v, safe=False) estimator = estimator.set_params(**cloned_parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) result = {} try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: # Note fit time as time until error fit_time = time.time() - start_time score_time = 0.0 if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): if isinstance(scorer, dict): test_scores = {name: error_score for name in scorer} if return_train_score: train_scores = test_scores.copy() else: test_scores = error_score if return_train_score: train_scores = error_score warnings.warn(""Estimator fit failed. The score on this train-test"" "" partition for these parameters will be set to %f. "" ""Details: \n%s"" % (error_score, format_exc()), FitFailedWarning) else: raise ValueError(""error_score must be the string 'raise' or a"" "" numeric value. (Hint: if using 'raise', please"" "" make sure that it has been spelled correctly.)"") result[""fit_failed""] = True else: result[""fit_failed""] = False fit_time = time.time() - start_time test_scores = _score(estimator, X_test, y_test, scorer) score_time = time.time() - start_time - fit_time if return_train_score: train_scores = _score(estimator, X_train, y_train, scorer) if verbose > 1: total_time = score_time + fit_time end_msg = f""[CV{progress_msg}] END "" result_msg = params_msg + ("";"" if params_msg else """") if verbose > 2 and isinstance(test_scores, dict): for scorer_name in sorted(test_scores): result_msg += f"" {scorer_name}: ("" if return_train_score: scorer_scores = train_scores[scorer_name] result_msg += f""train={scorer_scores:.3f}, "" result_msg += f""test={test_scores[scorer_name]:.3f})"" result_msg += f"" total time={logger.short_format_time(total_time)}"" # Right align the result_msg end_msg += ""."" * (80 - len(end_msg) - len(result_msg)) end_msg += result_msg print(end_msg) result[""test_scores""] = test_scores if return_train_score: result[""train_scores""] = train_scores if return_n_test_samples: result[""n_test_samples""] = _num_samples(X_test) if return_times: result[""fit_time""] = fit_time result[""score_time""] = score_time if return_parameters: result[""parameters""] = parameters if return_estimator: result[""estimator""] = estimator return result " 31493,"def build_multiple_text_options_output(**kwargs) -> Tuple[None, str]: """""" When there's no need to build output from the response but output text is based on command's action requsted this function will take the text based on the action and will return it Args: **kwargs: command_data (Dict): data about a specific command from the general commands_data dict Returns: Tuple containing a List of dictionaries parsed from the original response and a markdown table generated from the parsed response """""" command_data = kwargs['command_data'] action = commands_data['args_values']['action'] readable_output = command_data['output_texts'][action] return None, readable_output ","def build_multiple_text_options_output(**kwargs) -> Tuple[None, str]: """""" When there's no need to build output from the response but output text is based on command's action requested this function will take the text based on the action and will return it Args: **kwargs: command_data (Dict): data about a specific command from the general commands_data dict Returns: Tuple containing a List of dictionaries parsed from the original response and a markdown table generated from the parsed response """""" command_data = kwargs['command_data'] action = commands_data['args_values']['action'] readable_output = command_data['output_texts'][action] return None, readable_output " 30820,"def enablee_disable_user_command(client, args, is_active): """""" Enable user by setting active = true in Atlassian API , if Connection to the service is successful. Args: demisto command line argument client: Atlassian API Returns: success : success=True, id as iden, active status fail : success=False, id as iden, errorCod, errorMessage, details """""" scim = verify_and_load_scim_data(args.get('scim')) scim_flat_data = map_scim(scim) user_id = scim_flat_data.get('id') if not user_id: raise Exception('You must provide either the id of the user') data = {""Operations"": [{""op"": ""replace"", ""value"": {""active"": is_active}}]} res = client.enable_disable_user(user_id, data) if res.status_code == 200: msg = USER_ENABLED if is_active else USER_DISABLED generic_iam_context = OutputContext(success=True, iden=user_id, details=msg, active=is_active) else: generic_iam_context = OutputContext(success=False, iden=user_id, errorCode=res.status_code, errorMessage=res.json().get('detail'), details=res.json()) generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)' outputs = { generic_iam_context_dt: generic_iam_context.data } name = 'Enable Atlassian User:' if is_active else 'Disable Atlassian User:' readable_output = tableToMarkdown(name=name, t=generic_iam_context.data, headers=[""brand"", ""instanceName"", ""success"", ""active"", ""id"", ""username"", ""email"", ""errorCode"", ""errorMessage"", ""details""], removeNull=True) return ( readable_output, outputs, generic_iam_context.data ) ","def enable_disable_user_command(client, args, is_active): """""" Enable user by setting active = true in Atlassian API , if Connection to the service is successful. Args: demisto command line argument client: Atlassian API Returns: success : success=True, id as iden, active status fail : success=False, id as iden, errorCod, errorMessage, details """""" scim = verify_and_load_scim_data(args.get('scim')) scim_flat_data = map_scim(scim) user_id = scim_flat_data.get('id') if not user_id: raise Exception('You must provide either the id of the user') data = {""Operations"": [{""op"": ""replace"", ""value"": {""active"": is_active}}]} res = client.enable_disable_user(user_id, data) if res.status_code == 200: msg = USER_ENABLED if is_active else USER_DISABLED generic_iam_context = OutputContext(success=True, iden=user_id, details=msg, active=is_active) else: generic_iam_context = OutputContext(success=False, iden=user_id, errorCode=res.status_code, errorMessage=res.json().get('detail'), details=res.json()) generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)' outputs = { generic_iam_context_dt: generic_iam_context.data } name = 'Enable Atlassian User:' if is_active else 'Disable Atlassian User:' readable_output = tableToMarkdown(name=name, t=generic_iam_context.data, headers=[""brand"", ""instanceName"", ""success"", ""active"", ""id"", ""username"", ""email"", ""errorCode"", ""errorMessage"", ""details""], removeNull=True) return ( readable_output, outputs, generic_iam_context.data ) " 24852,"def my_func(self, doc_type): # [missing-return-doc, missing-return-type-doc] """"""This is a docstring. Parameters: doc_type (str): Google """""" return False ","def my_func(self, doc_type): # [missing-return-doc, missing-return-type-doc] """"""warn_missing_google_returns Parameters: doc_type (str): Google """""" return False " 2932,"def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False, sharey=False, figsize=None, layout=None, bins=10, **kwds): """""" Make a histogram of the DataFrame's. A `histogram`_ is a representation of the distribution of data. This function calls :meth:`matplotlib.pyplot.hist`, on each series in the DataFrame, resulting in one histogram per column. .. _histogram: https://en.wikipedia.org/wiki/Histogram Parameters ---------- data : DataFrame The pandas object holding the data. column : string or sequence If passed, will be used to limit data to a subset of columns. by : object, optional If passed, then used to form histograms for separate groups. grid : bool, default True Whether to show axis grid lines. xlabelsize : int, default None If specified changes the x-axis label size. xrot : float, default None Rotation of x axis labels. For example, a value of 90 displays the x labels rotated 90 degrees clockwise. ylabelsize : int, default None If specified changes the y-axis label size. yrot : float, default None Rotation of y axis labels. For example, a value of 90 displays the y labels rotated 90 degrees clockwise. ax : Matplotlib axes object, default None The axes to plot the histogram on. sharex : bool, default True if ax is None else False In case subplots=True, share x axis and set some x axis labels to invisible; defaults to True if ax is None otherwise False if an ax is passed in. Note that passing in both an ax and sharex=True will alter all x axis labels for all subplots in a figure. sharey : bool, default False In case subplots=True, share y axis and set some y axis labels to invisible. figsize : tuple The size in inches of the figure to create. Uses the value in `matplotlib.rcParams` by default. layout : tuple, optional Tuple of (rows, columns) for the layout of the histograms. bins : integer or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. **kwds All other plotting keyword arguments to be passed to :meth:`matplotlib.pyplot.hist`. Returns ------- matplotlib.AxesSubplot or numpy.ndarray of them See Also -------- matplotlib.pyplot.hist : Plot a histogram using matplotlib. Examples -------- .. plot:: :context: close-figs This example draws a histogram based on the length and width of some animals, displayed in three bins >>> df = pd.DataFrame({ ... 'length': [1.5, 0.5, 1.2, 0.9, 3], ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1] ... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse']) >>> hist = df.hist(bins=3) """""" _raise_if_no_mpl() _converter._WARN = False if by is not None: axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize, sharex=sharex, sharey=sharey, layout=layout, bins=bins, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, **kwds) return axes if column is not None: if not isinstance(column, (list, np.ndarray, ABCIndexClass)): column = [column] data = data[column] data = data._get_numeric_data() naxes = len(data.columns) if naxes == 0: raise ValueError(""hist method requires numerical columns, "" ""nothing to plot."") fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout) _axes = _flatten(axes) for i, col in enumerate(com.try_sort(data.columns)): ax = _axes[i] ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) ax.grid(grid) _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot) fig.subplots_adjust(wspace=0.3, hspace=0.3) return axes ","def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False, sharey=False, figsize=None, layout=None, bins=10, **kwds): """""" Make a histogram of the DataFrame's. A `histogram`_ is a representation of the distribution of data. This function calls :meth:`matplotlib.pyplot.hist`, on each series in the DataFrame, resulting in one histogram per column. .. _histogram: https://en.wikipedia.org/wiki/Histogram Parameters ---------- data : DataFrame The pandas object holding the data. column : string or sequence If passed, will be used to limit data to a subset of columns. by : object, optional If passed, then used to form histograms for separate groups. grid : bool, default True Whether to show axis grid lines. xlabelsize : int, default None If specified changes the x-axis label size. xrot : float, default None Rotation of x axis labels. For example, a value of 90 displays the x labels rotated 90 degrees clockwise. ylabelsize : int, default None If specified changes the y-axis label size. yrot : float, default None Rotation of y axis labels. For example, a value of 90 displays the y labels rotated 90 degrees clockwise. ax : Matplotlib axes object, default None The axes to plot the histogram on. sharex : bool, default True if ax is None else False In case subplots=True, share x axis and set some x axis labels to invisible; defaults to True if ax is None otherwise False if an ax is passed in. Note that passing in both an ax and sharex=True will alter all x axis labels for all subplots in a figure. sharey : bool, default False In case subplots=True, share y axis and set some y axis labels to invisible. figsize : tuple The size in inches of the figure to create. Uses the value in `matplotlib.rcParams` by default. layout : tuple, optional Tuple of (rows, columns) for the layout of the histograms. bins : integer or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. **kwds All other plotting keyword arguments to be passed to :meth:`matplotlib.pyplot.hist`. Returns ------- matplotlib.AxesSubplot or numpy.ndarray of them See Also -------- matplotlib.pyplot.hist : Plot a histogram using matplotlib. Examples -------- .. plot:: :context: close-figs This example draws a histogram based on the length and width of some animals, displayed in three bins >>> df = pd.DataFrame({ ... 'length': [1.5, 0.5, 1.2, 0.9, 3], ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1] ... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse']) >>> hist = df.hist(bins=3) """""" _raise_if_no_mpl() _converter._WARN = False if by is not None: axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize, sharex=sharex, sharey=sharey, layout=layout, bins=bins, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, **kwds) return axes if column is not None: if not isinstance(column, (list, np.ndarray, ABCIndexClass)): column = [column] data = data[column] data = data._get_numeric_data() naxes = len(data.columns) if not naxes: raise ValueError(""hist method requires numerical columns, "" ""nothing to plot."") fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout) _axes = _flatten(axes) for i, col in enumerate(com.try_sort(data.columns)): ax = _axes[i] ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) ax.grid(grid) _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot) fig.subplots_adjust(wspace=0.3, hspace=0.3) return axes " 37461,"def _validate_meas_map(instruction_map: Dict[Tuple[int, instructions.Acquire], List[instructions.Acquire]], meas_map: List[List[int]]) -> None: """"""Validate all qubits tied in ``meas_map`` are to be acquired. Args: instruction_map: A dictionary grouping Acquire instructions according to their start time and duration. meas_map: List of groups of qubits that must be acquired together. Raises: QiskitError: If the instructions do not satisfy the measurement map. """""" sorted_inst_map = sorted(instruction_map.items(), key=lambda item: item[0]) meas_map_sets = [set(m) for m in meas_map] # if there is time overlap: # - if the overlap is in the same meas_map -- Raise Error for idx, inst in enumerate(sorted_inst_map[:-1]): inst_end_time = inst[0][0] + inst[0][1] next_inst = sorted_inst_map[idx+1] next_inst_time = next_inst[0][0] if next_inst_time < inst_end_time: inst_qubits = {inst.channel.index for inst in inst[1]} next_inst_qubits = {inst.channel.index for inst in next_inst[1]} for meas_set in meas_map_sets: common_instr_qubits = inst_qubits.intersection(meas_set) common_next = next_inst_qubits.intersection(meas_set) if common_instr_qubits and common_next: raise QiskitError('Qubits {} and {} are in the same measurement grouping: {}. ' 'They must either be acquired at the same time, or disjointly' '. Instead, they were acquired at times: {}-{} and ' '{}-{}'.format(common_instr_qubits, common_next, meas_map, inst[0][0], inst_end_time, next_inst_time, next_inst_time + next_inst[0][1])) ","def _validate_meas_map(instruction_map: Dict[Tuple[int, instructions.Acquire], List[instructions.Acquire]], meas_map: List[List[int]]) -> None: """"""Validate all qubits tied in ``meas_map`` are to be acquired. Args: instruction_map: A dictionary grouping Acquire instructions according to their start time and duration. meas_map: List of groups of qubits that must be acquired together. Raises: QiskitError: If the instructions do not satisfy the measurement map. """""" sorted_inst_map = sorted(instruction_map.items(), key=lambda item: item[0]) meas_map_sets = [set(m) for m in meas_map] # error if there is time overlap between qubits in the same meas_map for idx, inst in enumerate(sorted_inst_map[:-1]): inst_end_time = inst[0][0] + inst[0][1] next_inst = sorted_inst_map[idx+1] next_inst_time = next_inst[0][0] if next_inst_time < inst_end_time: inst_qubits = {inst.channel.index for inst in inst[1]} next_inst_qubits = {inst.channel.index for inst in next_inst[1]} for meas_set in meas_map_sets: common_instr_qubits = inst_qubits.intersection(meas_set) common_next = next_inst_qubits.intersection(meas_set) if common_instr_qubits and common_next: raise QiskitError('Qubits {} and {} are in the same measurement grouping: {}. ' 'They must either be acquired at the same time, or disjointly' '. Instead, they were acquired at times: {}-{} and ' '{}-{}'.format(common_instr_qubits, common_next, meas_map, inst[0][0], inst_end_time, next_inst_time, next_inst_time + next_inst[0][1])) " 24753,"def _has_different_parameters( original: List[astroid.AssignName], overridden: List[astroid.AssignName], dummy_parameter_regex: Pattern, counter: int, ): result = [] zipped = zip_longest(original, overridden) for original_param, overridden_param in zipped: params = (original_param, overridden_param) if not all(params): return [""Number of parameters has changed in""] # check for the arguments' type original_type = original_param.parent.annotations[counter] if original_type is not None: original_type = str(original_param.parent.annotations[counter].name) overridden_type = overridden_param.parent.annotations[counter] if overridden_type is not None: overridden_type = str(overridden_param.parent.annotations[counter].name) if original_type != overridden_type: result.append( ""Parameter '"" + str(original_param.name) + ""' was of type '"" + original_type + ""' and is now of type '"" + overridden_type + ""' in"" ) counter += 1 # check for the arguments' name names = [param.name for param in params] if any(dummy_parameter_regex.match(name) for name in names): continue if original_param.name != overridden_param.name: result.append( ""Parameter '"" + str(original_param.name) + ""' has been renamed in"" ) return result ","def _has_different_parameters( original: List[astroid.AssignName], overridden: List[astroid.AssignName], dummy_parameter_regex: Pattern, counter: int, ) -> List[str]: result = [] zipped = zip_longest(original, overridden) for original_param, overridden_param in zipped: params = (original_param, overridden_param) if not all(params): return [""Number of parameters has changed in""] # check for the arguments' type original_type = original_param.parent.annotations[counter] if original_type is not None: original_type = str(original_param.parent.annotations[counter].name) overridden_type = overridden_param.parent.annotations[counter] if overridden_type is not None: overridden_type = str(overridden_param.parent.annotations[counter].name) if original_type != overridden_type: result.append( ""Parameter '"" + str(original_param.name) + ""' was of type '"" + original_type + ""' and is now of type '"" + overridden_type + ""' in"" ) counter += 1 # check for the arguments' name names = [param.name for param in params] if any(dummy_parameter_regex.match(name) for name in names): continue if original_param.name != overridden_param.name: result.append( ""Parameter '"" + str(original_param.name) + ""' has been renamed in"" ) return result " 39375,"def merge( datasets, merge_points=True, main_has_priority=True, progress_bar=False ): """"""Merge several datasets. .. note:: The behavior of this filter varies from the :func:`PolyDataFilters.boolean_union` filter. This filter does not attempt to create a manifold mesh and will include internal surfaces when two meshes overlap. datasets : sequence of :class:`pyvista.Dataset` Sequence of datasets. Can be of any :class:`pyvista.Dataset` merge_points : bool, optional Merge equivalent points when ``True``. Defaults to ``True``. main_has_priority : bool, optional When this parameter is ``True`` and ``merge_points=True``, the arrays of the merging grids will be overwritten by the original main mesh. main_has_priority : bool, optional When this parameter is ``True`` and ``merge_points=True``, the arrays of the merging grids will be overwritten by the original main mesh. progress_bar : bool, optional Display a progress bar to indicate progress. Returns ------- pyvista.DataSet :class:`pyvista.PolyData` if all items in datasets are :class:`pyvista.PolyData`, otherwise returns a :class:`pyvista.UnstructuredGrid`. Examples -------- Merge two polydata datasets. >>> import pyvista >>> sphere = pyvista.Sphere(center=(0, 0, 1)) >>> cube = pyvista.Cube() >>> mesh = pyvista.merge([cube, sphere]) >>> mesh.plot() """""" if not isinstance(datasets, collections.Sequence): raise TypeError(f""Expected a sequence, got {type(datasets)}"") if len(datasets) < 1: raise ValueError(""Expected at least one dataset."") first = datasets[0] if not isinstance(first, pyvista.DataSet): raise TypeError(f""Expected pyvista.DataSet, not {type(first)}"") return datasets[0].merge( datasets[1:], merge_points=merge_points, main_has_priority=main_has_priority, progress_bar=progress_bar, ) ","def merge( datasets, merge_points=True, main_has_priority=True, progress_bar=False, ): """"""Merge several datasets. .. note:: The behavior of this filter varies from the :func:`PolyDataFilters.boolean_union` filter. This filter does not attempt to create a manifold mesh and will include internal surfaces when two meshes overlap. datasets : sequence of :class:`pyvista.Dataset` Sequence of datasets. Can be of any :class:`pyvista.Dataset` merge_points : bool, optional Merge equivalent points when ``True``. Defaults to ``True``. main_has_priority : bool, optional When this parameter is ``True`` and ``merge_points=True``, the arrays of the merging grids will be overwritten by the original main mesh. main_has_priority : bool, optional When this parameter is ``True`` and ``merge_points=True``, the arrays of the merging grids will be overwritten by the original main mesh. progress_bar : bool, optional Display a progress bar to indicate progress. Returns ------- pyvista.DataSet :class:`pyvista.PolyData` if all items in datasets are :class:`pyvista.PolyData`, otherwise returns a :class:`pyvista.UnstructuredGrid`. Examples -------- Merge two polydata datasets. >>> import pyvista >>> sphere = pyvista.Sphere(center=(0, 0, 1)) >>> cube = pyvista.Cube() >>> mesh = pyvista.merge([cube, sphere]) >>> mesh.plot() """""" if not isinstance(datasets, collections.Sequence): raise TypeError(f""Expected a sequence, got {type(datasets)}"") if len(datasets) < 1: raise ValueError(""Expected at least one dataset."") first = datasets[0] if not isinstance(first, pyvista.DataSet): raise TypeError(f""Expected pyvista.DataSet, not {type(first)}"") return datasets[0].merge( datasets[1:], merge_points=merge_points, main_has_priority=main_has_priority, progress_bar=progress_bar, ) " 54821,"def prob(samples: list, excited_state: list) -> float: r""""""Generate probability of observing a Fock state. **Example usage:** >>> excited_state = [0, 2] >>> samples = [[0, 2], [1, 1], [0, 2], [2, 0], [1, 1], [0, 2], [1, 1], [1, 1], [1, 1], [0, 2]] >>> prob(samples, excited_state) 0.4 Args: samples list[list[int]]: a list of samples excited_state (list): a Fock state Returns: float: probability of observing a Fock state in the given samples """""" if len(samples) == 0: raise ValueError(""The samples list must not be empty"") if len(excited_state) == 0: raise ValueError(""The excited state list must not be empty"") if not len(excited_state) == len(samples[0]): raise ValueError(""The number of modes in the samples and the excited state must be equal"") if np.any(np.array(excited_state) < 0): raise ValueError(""The excited state must not contain negative values"") return samples.count(excited_state) / len(samples) ","def prob(samples: list, excited_state: list) -> float: r""""""Generate probability of observing a Fock state. **Example usage:** >>> excited_state = [0, 2] >>> samples = [[0, 2], [1, 1], [0, 2], [2, 0], [1, 1], [0, 2], [1, 1], [1, 1], [1, 1]] >>> prob(samples, excited_state) 0.3333333333333333 Args: samples list[list[int]]: a list of samples excited_state (list): a Fock state Returns: float: probability of observing a Fock state in the given samples """""" if len(samples) == 0: raise ValueError(""The samples list must not be empty"") if len(excited_state) == 0: raise ValueError(""The excited state list must not be empty"") if not len(excited_state) == len(samples[0]): raise ValueError(""The number of modes in the samples and the excited state must be equal"") if np.any(np.array(excited_state) < 0): raise ValueError(""The excited state must not contain negative values"") return samples.count(excited_state) / len(samples) " 26084,"def _dict_equal(d1, d2): """"""Check that two dictionaries are equal. Nested dictionaries are walked recursively. Array type values are compared approximately, other types are checked for identity. """""" if not (isinstance(d1, dict) and isinstance(d2, dict)): return False if not d1.keys() == d2.keys(): return False for key in d1.keys(): if isinstance(d1[key], dict) and isinstance(d2[key], dict): return _dict_equal(d1[key], d2[key]) value_pair = [d1[key], d2[key]] if _contain_arrays(value_pair): return _all_arrays_equal(value_pair) return _all_values_equal(value_pair) ","def _dict_equal(d1, d2): """"""Check that two dictionaries are equal. Nested dictionaries are walked recursively. Array type values are compared approximately, other types are checked for identity. """""" if not (isinstance(d1, dict) and isinstance(d2, dict)): return False if d1.keys() != d2.keys(): return False for key in d1.keys(): if isinstance(d1[key], dict) and isinstance(d2[key], dict): return _dict_equal(d1[key], d2[key]) value_pair = [d1[key], d2[key]] if _contain_arrays(value_pair): return _all_arrays_equal(value_pair) return _all_values_equal(value_pair) " 49886,"def get_sky_diffuse(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=None, airmass=None, model='isotropic', model_perez='allsitescomposite1990'): r"""""" Determine in-plane sky diffuse irradiance component using the specified sky diffuse irradiance model. Sky diffuse models include: * isotropic (default) * klucher * haydavies * reindl * king * perez Parameters ---------- surface_tilt : numeric Panel tilt from horizontal.[degree] surface_azimuth : numeric Panel azimuth from north. [degree] solar_zenith : numeric Solar zenith angle. [degree] solar_azimuth : numeric Solar azimuth angle. [degree] dni : numeric Direct Normal Irradiance. [W/m2] ghi : numeric Global horizontal irradiance. [W/m2] dhi : numeric Diffuse horizontal irradiance. [W/m2] dni_extra : None or numeric, default None Extraterrestrial direct normal irradiance. [W/m2] airmass : None or numeric, default None Relative airmass (not adjusted for pressure). [unitless] model : String, default 'isotropic' Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies', 'reindl', 'king', 'perez'. model_perez : String, default 'allsitescomposite1990' Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`. Returns ------- poa_sky_diffuse : numeric Sky diffuse irradiance in the plane of array. [W/m2] Raises ------ ValueError If model is one of 'haydavies', 'reindl', or 'perez' and dni_extra is None. Notes ----- Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`. The 'perez' model requires relative airmass ('airmass') as input. If 'airmass' is not provided, it is calculated usign the defaults in :py:func:`~pvlib.irradiance.get_relative_airmass`. """""" model = model.lower() if (model in {'haydavies', 'reindl', 'perez'}) and (dni_extra is None): raise ValueError(f'dni_extra is required for model {model}') if model == 'isotropic': sky = isotropic(surface_tilt, dhi) elif model == 'klucher': sky = klucher(surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth) elif model == 'haydavies': sky = haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth) elif model == 'reindl': sky = reindl(surface_tilt, surface_azimuth, dhi, dni, ghi, dni_extra, solar_zenith, solar_azimuth) elif model == 'king': sky = king(surface_tilt, dhi, ghi, solar_zenith) elif model == 'perez': if airmass is None: airmass = atmosphere.get_relative_airmass(solar_zenith) sky = perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth, airmass, model=model_perez) else: raise ValueError(f'invalid model selection {model}') return sky ","def get_sky_diffuse(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=None, airmass=None, model='isotropic', model_perez='allsitescomposite1990'): r"""""" Determine in-plane sky diffuse irradiance component using the specified sky diffuse irradiance model. Sky diffuse models include: * isotropic (default) * klucher * haydavies * reindl * king * perez Parameters ---------- surface_tilt : numeric Panel tilt from horizontal.[degree] surface_azimuth : numeric Panel azimuth from north. [degree] solar_zenith : numeric Solar zenith angle. [degree] solar_azimuth : numeric Solar azimuth angle. [degree] dni : numeric Direct Normal Irradiance. [W/m2] ghi : numeric Global horizontal irradiance. [W/m2] dhi : numeric Diffuse horizontal irradiance. [W/m2] dni_extra : None or numeric, default None Extraterrestrial direct normal irradiance. [W/m2] airmass : None or numeric, default None Relative airmass (not adjusted for pressure). [unitless] model : String, default 'isotropic' Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies', 'reindl', 'king', 'perez'. model_perez : String, default 'allsitescomposite1990' Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`. Returns ------- poa_sky_diffuse : numeric Sky diffuse irradiance in the plane of array. [W/m2] Raises ------ ValueError If model is one of ``'haydavies'``, ``'reindl'``, or ``'perez'`` and ``dni_extra`` is ``None``. Notes ----- Models ``'haydavies'``, ``'reindl'``, or ``'perez'`` require ``dni_extra``. Values can be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`. The ``'perez'`` model requires relative airmass (``airmass``) as input. If ``airmass`` is not provided, it is calculated using the defaults in :py:func:`~pvlib.atmosphere.get_relative_airmass`. """""" model = model.lower() if (model in {'haydavies', 'reindl', 'perez'}) and (dni_extra is None): raise ValueError(f'dni_extra is required for model {model}') if model == 'isotropic': sky = isotropic(surface_tilt, dhi) elif model == 'klucher': sky = klucher(surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth) elif model == 'haydavies': sky = haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth) elif model == 'reindl': sky = reindl(surface_tilt, surface_azimuth, dhi, dni, ghi, dni_extra, solar_zenith, solar_azimuth) elif model == 'king': sky = king(surface_tilt, dhi, ghi, solar_zenith) elif model == 'perez': if airmass is None: airmass = atmosphere.get_relative_airmass(solar_zenith) sky = perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth, airmass, model=model_perez) else: raise ValueError(f'invalid model selection {model}') return sky " 28856,"def oauth_url( client_id: Union[int, str], *, permissions: Permissions = MISSING, guild: Snowflake = MISSING, redirect_uri: str = MISSING, scopes: Iterable[str] = MISSING, disable_guild_select: bool = MISSING, ): """"""A helper function that returns the OAuth2 URL for inviting the bot into guilds. Parameters ----------- client_id: Union[:class:`int`, :class:`str`] The client ID for your bot. permissions: :class:`~discord.Permissions` The permissions you're requesting. If not given then you won't be requesting any permissions. guild: :class:`~discord.abc.Snowflake` The guild to pre-select in the authorization screen, if available. redirect_uri: :class:`str` An optional valid redirect URI. scopes: Iterable[:class:`str`] An optional valid list of scopes. Defaults to ``('bot',)``. .. versionadded:: 1.7 disable_guild_select: :class:`bool` Whether to disallow the user from changing the guild dropdown. .. versionadded:: 2.0 Returns -------- :class:`str` The OAuth2 URL for inviting the bot into guilds. """""" url = f'https://discord.com/oauth2/authorize?client_id={client_id}' url += '&scope=' + '+'.join(scopes if scopes is not MISSING else ('bot',)) if permissions is not MISSING: url += f'&permissions={permissions.value}' if guild is not MISSING: url += f'&guild_id={guild.id}' if redirect_uri is not MISSING: from urllib.parse import urlencode url += '&response_type=code&' + urlencode({'redirect_uri': redirect_uri}) if disable_guild_select is not MISSING: url += '&disable_guild_select=' url += 'true' if disable_guild_select else 'false' return url ","def oauth_url( client_id: Union[int, str], *, permissions: Permissions = MISSING, guild: Snowflake = MISSING, redirect_uri: str = MISSING, scopes: Iterable[str] = MISSING, disable_guild_select: bool = MISSING, ): """"""A helper function that returns the OAuth2 URL for inviting the bot into guilds. Parameters ----------- client_id: Union[:class:`int`, :class:`str`] The client ID for your bot. permissions: :class:`~discord.Permissions` The permissions you're requesting. If not given then you won't be requesting any permissions. guild: :class:`~discord.abc.Snowflake` The guild to pre-select in the authorization screen, if available. redirect_uri: :class:`str` An optional valid redirect URI. scopes: Iterable[:class:`str`] An optional valid list of scopes. Defaults to ``('bot',)``. .. versionadded:: 1.7 disable_guild_select: :class:`bool` Whether to disallow the user from changing the guild dropdown. .. versionadded:: 2.0 Returns -------- :class:`str` The OAuth2 URL for inviting the bot into guilds. """""" url = f'https://discord.com/oauth2/authorize?client_id={client_id}' url += '&scope=' + '+'.join(scopes or ('bot',)) if permissions is not MISSING: url += f'&permissions={permissions.value}' if guild is not MISSING: url += f'&guild_id={guild.id}' if redirect_uri is not MISSING: from urllib.parse import urlencode url += '&response_type=code&' + urlencode({'redirect_uri': redirect_uri}) if disable_guild_select is not MISSING: url += '&disable_guild_select=' url += 'true' if disable_guild_select else 'false' return url " 43568,"def Interferometer(theta, phi, varphi, wires, mesh='rectangular', beamsplitter='pennylane'): r""""""General linear interferometer, an array of beamsplitters and phase shifters. For :math:`M` wires, the general interferometer is specified by providing :math:`M(M-1)/2` transmittivity angles :math:`\theta` and the same number of phase angles :math:`\phi`, as well as either :math:`M-1` or :math:`M` additional rotation parameters :math:`\varphi`. For the parametrization of a universal interferometer :math:`M-1` such rotation parameters are sufficient. If :math:`M` rotation parameters are given, the interferometer is over-parametrized, but the resulting circuit is more symmetric, which can be advantageous. By specifying the keyword argument ``mesh``, the scheme used to implement the interferometer may be adjusted: * ``mesh='rectangular'`` (default): uses the scheme described in `Clements et al. `__, resulting in a *rectangular* array of :math:`M(M-1)/2` beamsplitters arranged in :math:`M` slices and ordered from left to right and top to bottom in each slice. The first beamsplitter acts on wires :math:`0` and :math:`1`: .. figure:: ../../_static/clements.png :align: center :width: 30% :target: javascript:void(0); * ``mesh='triangular'``: uses the scheme described in `Reck et al. `__, resulting in a *triangular* array of :math:`M(M-1)/2` beamsplitters arranged in :math:`2M-3` slices and ordered from left to right and top to bottom. The first and fourth beamsplitters act on wires :math:`M-1` and :math:`M`, the second on :math:`M-2` and :math:`M-1`, and the third on :math:`M-3` and :math:`M-2`, and so on. .. figure:: ../../_static/reck.png :align: center :width: 30% :target: javascript:void(0); In both schemes, the network of :class:`~pennylane.ops.Beamsplitter` operations is followed by :math:`M` (or :math:`M-1`) local :class:`~pennylane.ops.Rotation` Operations. In the latter case, the rotation on the last wire is left out. The rectangular decomposition is generally advantageous, as it has a lower circuit depth (:math:`M` vs :math:`2M-3`) and optical depth than the triangular decomposition, resulting in reduced optical loss. This is an example of a 4-mode interferometer with beamsplitters :math:`B` and rotations :math:`R`, using ``mesh='rectangular'``: .. figure:: ../../_static/layer_interferometer.png :align: center :width: 60% :target: javascript:void(0); .. note:: The decomposition as formulated in `Clements et al. `__ uses a different convention for a beamsplitter :math:`T(\theta, \phi)` than PennyLane, namely: .. math:: T(\theta, \phi) = BS(\theta, 0) R(\phi) For the universality of the decomposition, the used convention is irrelevant, but for a given set of angles the resulting interferometers will be different. If an interferometer consistent with the convention from `Clements et al. `__ is needed, the optional keyword argument ``beamsplitter='clements'`` can be specified. This will result in each :class:`~pennylane.ops.Beamsplitter` being preceded by a :class:`~pennylane.ops.Rotation` and thus increase the number of elementary operations in the circuit. Args: theta (array): length :math:`M(M-1)/2` array of transmittivity angles :math:`\theta` phi (array): length :math:`M(M-1)/2` array of phase angles :math:`\phi` varphi (array): length :math:`M` or :math:`M-1` array of rotation angles :math:`\varphi` wires (Sequence[int]): wires the interferometer should act on Keyword Args: mesh (string): the type of mesh to use beamsplitter (str): if ``clements``, the beamsplitter convention from Clements et al. 2016 (https://dx.doi.org/10.1364/OPTICA.3.001460) is used; if ``pennylane``, the beamsplitter is implemented via PennyLane's ``Beamsplitter`` operation. Raises: QuantumFunctionError: if `beamsplitter` or `mesh` is an instance of :class:`~pennylane.variable.Variable` """""" if isinstance(beamsplitter, Variable): raise QuantumFunctionError(""The beamsplitter parameter influences the "" ""circuit architecture and can not be passed as a QNode parameter."") if isinstance(mesh, Variable): raise QuantumFunctionError(""The mesh parameter influences the circuit architecture "" ""and can not be passed as a QNode parameter."") if not isinstance(wires, Sequence): w = [wires] else: w = wires M = len(w) if M == 1: # the interferometer is a single rotation Rotation(varphi[0], wires=w[0]) return n = 0 # keep track of free parameters if mesh == 'rectangular': # Apply the Clements beamsplitter array # The array depth is N for l in range(M): for k, (w1, w2) in enumerate(zip(w[:-1], w[1:])): #skip even or odd pairs depending on layer if (l+k)%2 != 1: if beamsplitter == 'clements': Rotation(phi[n], wires=[w1]) Beamsplitter(theta[n], 0, wires=[w1, w2]) else: Beamsplitter(theta[n], phi[n], wires=[w1, w2]) n += 1 elif mesh == 'triangular': # apply the Reck beamsplitter array # The array depth is 2*N-3 for l in range(2*M-3): for k in range(abs(l+1-(M-1)), M-1, 2): if beamsplitter == 'clements': Rotation(phi[n], wires=[w[k]]) Beamsplitter(theta[n], 0, wires=[w[k], w[k+1]]) else: Beamsplitter(theta[n], phi[n], wires=[w[k], w[k+1]]) n += 1 # apply the final local phase shifts to all modes for i, p in enumerate(varphi): Rotation(p, wires=[w[i]]) ","def Interferometer(theta, phi, varphi, wires, mesh='rectangular', beamsplitter='pennylane'): r""""""General linear interferometer, an array of beamsplitters and phase shifters. For :math:`M` wires, the general interferometer is specified by providing :math:`M(M-1)/2` transmittivity angles :math:`\theta` and the same number of phase angles :math:`\phi`, as well as either :math:`M-1` or :math:`M` additional rotation parameters :math:`\varphi`. For the parametrization of a universal interferometer :math:`M-1` such rotation parameters are sufficient. If :math:`M` rotation parameters are given, the interferometer is over-parametrized, but the resulting circuit is more symmetric, which can be advantageous. By specifying the keyword argument ``mesh``, the scheme used to implement the interferometer may be adjusted: * ``mesh='rectangular'`` (default): uses the scheme described in `Clements et al. `__, resulting in a *rectangular* array of :math:`M(M-1)/2` beamsplitters arranged in :math:`M` slices and ordered from left to right and top to bottom in each slice. The first beamsplitter acts on wires :math:`0` and :math:`1`: .. figure:: ../../_static/clements.png :align: center :width: 30% :target: javascript:void(0); * ``mesh='triangular'``: uses the scheme described in `Reck et al. `__, resulting in a *triangular* array of :math:`M(M-1)/2` beamsplitters arranged in :math:`2M-3` slices and ordered from left to right and top to bottom. The first and fourth beamsplitters act on wires :math:`M-1` and :math:`M`, the second on :math:`M-2` and :math:`M-1`, and the third on :math:`M-3` and :math:`M-2`, and so on. .. figure:: ../../_static/reck.png :align: center :width: 30% :target: javascript:void(0); In both schemes, the network of :class:`~pennylane.ops.Beamsplitter` operations is followed by :math:`M` (or :math:`M-1`) local :class:`~pennylane.ops.Rotation` Operations. In the latter case, the rotation on the last wire is left out. The rectangular decomposition is generally advantageous, as it has a lower circuit depth (:math:`M` vs :math:`2M-3`) and optical depth than the triangular decomposition, resulting in reduced optical loss. This is an example of a 4-mode interferometer with beamsplitters :math:`B` and rotations :math:`R`, using ``mesh='rectangular'``: .. figure:: ../../_static/layer_interferometer.png :align: center :width: 60% :target: javascript:void(0); .. note:: The decomposition as formulated in `Clements et al. `__ uses a different convention for a beamsplitter :math:`T(\theta, \phi)` than PennyLane, namely: .. math:: T(\theta, \phi) = BS(\theta, 0) R(\phi) For the universality of the decomposition, the used convention is irrelevant, but for a given set of angles the resulting interferometers will be different. If an interferometer consistent with the convention from `Clements et al. `__ is needed, the optional keyword argument ``beamsplitter='clements'`` can be specified. This will result in each :class:`~pennylane.ops.Beamsplitter` being preceded by a :class:`~pennylane.ops.Rotation` and thus increase the number of elementary operations in the circuit. Args: theta (array): length :math:`M(M-1)/2` array of transmittivity angles :math:`\theta` phi (array): length :math:`M(M-1)/2` array of phase angles :math:`\phi` varphi (array): length :math:`M` or :math:`M-1` array of rotation angles :math:`\varphi` wires (Sequence[int]): wires the interferometer should act on Keyword Args: mesh (string): the type of mesh to use beamsplitter (str): if ``clements``, the beamsplitter convention from Clements et al. 2016 (https://dx.doi.org/10.1364/OPTICA.3.001460) is used; if ``pennylane``, the beamsplitter is implemented via PennyLane's ``Beamsplitter`` operation. Raises: QuantumFunctionError: if ``beamsplitter`` or ``mesh`` is an instance of :class:`~pennylane.variable.Variable` """""" if isinstance(beamsplitter, Variable): raise QuantumFunctionError(""The beamsplitter parameter influences the "" ""circuit architecture and can not be passed as a QNode parameter."") if isinstance(mesh, Variable): raise QuantumFunctionError(""The mesh parameter influences the circuit architecture "" ""and can not be passed as a QNode parameter."") if not isinstance(wires, Sequence): w = [wires] else: w = wires M = len(w) if M == 1: # the interferometer is a single rotation Rotation(varphi[0], wires=w[0]) return n = 0 # keep track of free parameters if mesh == 'rectangular': # Apply the Clements beamsplitter array # The array depth is N for l in range(M): for k, (w1, w2) in enumerate(zip(w[:-1], w[1:])): #skip even or odd pairs depending on layer if (l+k)%2 != 1: if beamsplitter == 'clements': Rotation(phi[n], wires=[w1]) Beamsplitter(theta[n], 0, wires=[w1, w2]) else: Beamsplitter(theta[n], phi[n], wires=[w1, w2]) n += 1 elif mesh == 'triangular': # apply the Reck beamsplitter array # The array depth is 2*N-3 for l in range(2*M-3): for k in range(abs(l+1-(M-1)), M-1, 2): if beamsplitter == 'clements': Rotation(phi[n], wires=[w[k]]) Beamsplitter(theta[n], 0, wires=[w[k], w[k+1]]) else: Beamsplitter(theta[n], phi[n], wires=[w[k], w[k+1]]) n += 1 # apply the final local phase shifts to all modes for i, p in enumerate(varphi): Rotation(p, wires=[w[i]]) " 24758,"def stripped_lines( lines, ignore_comments, ignore_docstrings, ignore_imports, ignore_signatures ): """"""return lines with leading/trailing whitespace and any ignored code features removed """""" if ignore_imports or ignore_signatures: tree = astroid.parse("""".join(lines)) if ignore_imports: node_is_import_by_lineno = ( (node.lineno, isinstance(node, (astroid.Import, astroid.ImportFrom))) for node in tree.body ) line_begins_import = { lineno: all(is_import for _, is_import in node_is_import_group) for lineno, node_is_import_group in groupby( node_is_import_by_lineno, key=lambda x: x[0] ) } current_line_is_import = False if ignore_signatures: functions = filter( lambda node: isinstance( node, (astroid.FunctionDef, astroid.AsyncFunctionDef) ), tree.body, ) signature_lines = set( chain(*(range(func.fromlineno, func.body[0].lineno) for func in functions)) ) strippedlines = [] docstring = None for lineno, line in enumerate(lines, start=1): line = line.strip() if ignore_docstrings: if not docstring: if line.startswith('""""""') or line.startswith(""'''""): docstring = line[:3] line = line[3:] elif line.startswith('r""""""') or line.startswith(""r'''""): docstring = line[1:4] line = line[4:] if docstring: if line.endswith(docstring): docstring = None line = """" if ignore_imports: current_line_is_import = line_begins_import.get( lineno, current_line_is_import ) if current_line_is_import: line = """" if ignore_comments: line = line.split(""#"", 1)[0].strip() if ignore_signatures and lineno in signature_lines: line = """" strippedlines.append(line) return strippedlines ","def stripped_lines( lines, ignore_comments: bool, ignore_docstrings: bool, ignore_imports: bool, ignore_signatures: bool ): """"""return lines with leading/trailing whitespace and any ignored code features removed """""" if ignore_imports or ignore_signatures: tree = astroid.parse("""".join(lines)) if ignore_imports: node_is_import_by_lineno = ( (node.lineno, isinstance(node, (astroid.Import, astroid.ImportFrom))) for node in tree.body ) line_begins_import = { lineno: all(is_import for _, is_import in node_is_import_group) for lineno, node_is_import_group in groupby( node_is_import_by_lineno, key=lambda x: x[0] ) } current_line_is_import = False if ignore_signatures: functions = filter( lambda node: isinstance( node, (astroid.FunctionDef, astroid.AsyncFunctionDef) ), tree.body, ) signature_lines = set( chain(*(range(func.fromlineno, func.body[0].lineno) for func in functions)) ) strippedlines = [] docstring = None for lineno, line in enumerate(lines, start=1): line = line.strip() if ignore_docstrings: if not docstring: if line.startswith('""""""') or line.startswith(""'''""): docstring = line[:3] line = line[3:] elif line.startswith('r""""""') or line.startswith(""r'''""): docstring = line[1:4] line = line[4:] if docstring: if line.endswith(docstring): docstring = None line = """" if ignore_imports: current_line_is_import = line_begins_import.get( lineno, current_line_is_import ) if current_line_is_import: line = """" if ignore_comments: line = line.split(""#"", 1)[0].strip() if ignore_signatures and lineno in signature_lines: line = """" strippedlines.append(line) return strippedlines " 45731,"def forecast( vil, velocity, timesteps, rainrate=None, n_cascade_levels=8, extrap_method=""semilagrangian"", ar_order=2, ar_window_radius=50, r_vil_window_radius=3, fft_method=""numpy"", apply_rainrate_mask=True, num_workers=1, extrap_kwargs=None, filter_kwargs=None, measure_time=False, ): """""" Generate a nowcast by using the autoregressive nowcasting using VIL (ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast. The key features are: 1) Growth and decay: implemented by using a cascade decomposition and a multiscale autoregressive integrated ARI(p,1) model. Instead of the original time series, the ARI model is applied to the differenced one corresponding to time derivatives. 2) Originally designed for using integrated liquid (VIL) as the input data. In this case, the rain rate (R) is obtained from VIL via an empirical relation. This implementation is more general so that the input can be any two-dimensional precipitation field. 3) The parameters of the ARI model and the R(VIL) relation are allowed to be spatially variable. The estimation is done using a moving window. Parameters ---------- vil: array_like Array of shape (ar_order+2,m,n) containing the input fields ordered by timestamp from oldest to newest. The inputs are expected to contain VIL or rain rate. The time steps between the inputs are assumed to be regular. velocity: array_like Array of shape (2,m,n) containing the x- and y-components of the advection field. The velocities are assumed to represent one time step between the inputs. All values are required to be finite. timesteps: int or list of floats Number of time steps to forecast or a list of time steps for which the forecasts are computed (relative to the input time step). The elements of the list are required to be in ascending order. rainrate: array_like Array of shape (m,n) containing the most recently observed rain rate field. If set to None, no R(VIL) conversion is done and the outputs are in the same units as the inputs. n_cascade_levels: int, optional The number of cascade levels to use. extrap_method: str, optional Name of the extrapolation method to use. See the documentation of pysteps.extrapolation.interface. ar_order: int, optional The order of the autoregressive model to use. The recommended values are 1 or 2. Using a higher-order model is strongly discouraged because the stationarity of the AR process cannot be guaranteed. ar_window_radius: int, optional The radius of the window to use for determining the parameters of the autoregressive model. Set to None to disable localization. r_vil_window_radius: int, optional The radius of the window to use for determining the R(VIL) relation. Applicable if rainrate is not None. fft_method: str, optional A string defining the FFT method to use (see utils.fft.get_method). Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed, the recommended method is 'pyfftw'. apply_rainrate_mask: bool Apply mask to prevent producing precipitation to areas where it was not originally observed. Defaults to True. Disabling this may improve some verification metrics but increases the number of false alarms. Applicable if rainrate is None. num_workers: int, optional The number of workers to use for parallel computation. Applicable if dask is installed or pyFFTW is used for computing the FFT. When num_workers>1, it is advisable to disable OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous threads. extrap_kwargs: dict, optional Optional dictionary containing keyword arguments for the extrapolation method. See the documentation of pysteps.extrapolation. filter_kwargs: dict, optional Optional dictionary containing keyword arguments for the filter method. See the documentation of pysteps.cascade.bandpass_filters.py. measure_time: bool, optional If True, measure, print and return the computation time. Returns ------- out: ndarray A three-dimensional array of shape (num_timesteps,m,n) containing a time series of forecast precipitation fields. The time series starts from t0+timestep, where timestep is taken from the input VIL/rain rate fields. If measure_time is True, the return value is a three-element tuple containing the nowcast array, the initialization time of the nowcast generator and the time used in the main loop (seconds). References ---------- :cite:`PCLH2020` """""" _check_inputs(vil, rainrate, velocity, timesteps, ar_order) if extrap_kwargs is None: extrap_kwargs = dict() else: extrap_kwargs = extrap_kwargs.copy() if filter_kwargs is None: filter_kwargs = dict() print(""Computing ANVIL nowcast"") print(""-----------------------"") print("""") print(""Inputs"") print(""------"") print(f""input dimensions: {vil.shape[1]}x{vil.shape[2]}"") print("""") print(""Methods"") print(""-------"") print(f""extrapolation: {extrap_method}"") print(f""FFT: {fft_method}"") print("""") print(""Parameters"") print(""----------"") if isinstance(timesteps, int): print(f""number of time steps: {timesteps}"") else: print(f""time steps: {timesteps}"") print(f""parallel threads: {num_workers}"") print(f""number of cascade levels: {n_cascade_levels}"") print(f""order of the ARI(p,1) model: {ar_order}"") if type(ar_window_radius) == int: print(f""ARI(p,1) window radius: {ar_window_radius}"") else: print(""ARI(p,1) window radius: none"") print(f""R(VIL) window radius: {r_vil_window_radius}"") if measure_time: starttime_init = time.time() m, n = vil.shape[1:] vil = vil.copy() if rainrate is None and apply_rainrate_mask: rainrate_mask = vil[-1, :] < 0.1 else: rainrate_mask = None if rainrate is not None: # determine the coefficients fields of the relation R=a*VIL+b by # localized linear regression r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius) else: r_vil_a, r_vil_b = None, None # transform the input fields to Lagrangian coordinates by extrapolation extrapolator = extrapolation.get_method(extrap_method) extrap_kwargs[""allow_nonfinite_values""] = ( True if np.any(~np.isfinite(vil)) else False ) res = list() def worker(vil, i): return ( i, extrapolator( vil[i, :], velocity, vil.shape[0] - 1 - i, **extrap_kwargs, )[-1], ) for i in range(vil.shape[0] - 1): if not DASK_IMPORTED or num_workers == 1: vil[i, :, :] = worker(vil, i)[1] else: res.append(dask.delayed(worker)(vil, i)) if DASK_IMPORTED and num_workers > 1: num_workers_ = len(res) if num_workers > len(res) else num_workers vil_e = dask.compute(*res, num_workers=num_workers_) for i in range(len(vil_e)): vil[vil_e[i][0], :] = vil_e[i][1] # compute the final mask as the intersection of the masks of the advected # fields mask = np.isfinite(vil[0, :]) for i in range(1, vil.shape[0]): mask = np.logical_and(mask, np.isfinite(vil[i, :])) if rainrate is None and apply_rainrate_mask: rainrate_mask = np.logical_and(rainrate_mask, mask) # apply cascade decomposition to the advected input fields bp_filter_method = cascade.get_method(""gaussian"") bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs) fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers) decomp_method, recomp_method = cascade.get_method(""fft"") vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n)) for i in range(vil.shape[0]): vil_ = vil[i, :].copy() vil_[~np.isfinite(vil_)] = 0.0 vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft) for j in range(n_cascade_levels): vil_dec[j, i, :] = vil_dec_i[""cascade_levels""][j, :] # compute time-lagged correlation coefficients for the cascade levels of # the advected and differenced input fields gamma = np.empty((n_cascade_levels, ar_order, m, n)) for i in range(n_cascade_levels): vil_diff = np.diff(vil_dec[i, :], axis=0) vil_diff[~np.isfinite(vil_diff)] = 0.0 for j in range(ar_order): gamma[i, j, :] = _moving_window_corrcoef( vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius ) if ar_order == 2: # if the order of the ARI model is 2, adjust the correlation coefficients # so that the resulting process is stationary for i in range(n_cascade_levels): gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2( gamma[i, 0, :], gamma[i, 1, :] ) # estimate the parameters of the ARI models phi = [] for i in range(n_cascade_levels): if ar_order > 2: phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1) elif ar_order == 2: phi_ = _estimate_ar2_params(gamma[i, :]) else: phi_ = _estimate_ar1_params(gamma[i, :]) phi.append(phi_) vil_dec = vil_dec[:, -(ar_order + 1) :, :] if measure_time: init_time = time.time() - starttime_init print(""Starting nowcast computation."") rainrate_f = [] extrap_kwargs[""return_displacement""] = True state = {""vil_dec"": vil_dec} params = { ""apply_rainrate_mask"": apply_rainrate_mask, ""mask"": mask, ""n_cascade_levels"": n_cascade_levels, ""phi"": phi, ""rainrate"": rainrate, ""rainrate_mask"": rainrate_mask, ""recomp_method"": recomp_method, ""r_vil_a"": r_vil_a, ""r_vil_b"": r_vil_b, } rainrate_f = nowcast_main_loop( vil[-1, :], velocity, state, timesteps, extrap_method, _update, extrap_kwargs=extrap_kwargs, params=params, measure_time=measure_time, ) if measure_time: rainrate_f, mainloop_time = rainrate_f if measure_time: return np.stack(rainrate_f), init_time, mainloop_time else: return np.stack(rainrate_f) ","def forecast( vil, velocity, timesteps, rainrate=None, n_cascade_levels=8, extrap_method=""semilagrangian"", ar_order=2, ar_window_radius=50, r_vil_window_radius=3, fft_method=""numpy"", apply_rainrate_mask=True, num_workers=1, extrap_kwargs=None, filter_kwargs=None, measure_time=False, ): """""" Generate a nowcast by using the autoregressive nowcasting using VIL (ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast. The key features are: 1) Growth and decay: implemented by using a cascade decomposition and a multiscale autoregressive integrated ARI(p,1) model. Instead of the original time series, the ARI model is applied to the differenced one corresponding to time derivatives. 2) Originally designed for using integrated liquid (VIL) as the input data. In this case, the rain rate (R) is obtained from VIL via an empirical relation. This implementation is more general so that the input can be any two-dimensional precipitation field. 3) The parameters of the ARI model and the R(VIL) relation are allowed to be spatially variable. The estimation is done using a moving window. Parameters ---------- vil: array_like Array of shape (ar_order+2,m,n) containing the input fields ordered by timestamp from oldest to newest. The inputs are expected to contain VIL or rain rate. The time steps between the inputs are assumed to be regular. velocity: array_like Array of shape (2,m,n) containing the x- and y-components of the advection field. The velocities are assumed to represent one time step between the inputs. All values are required to be finite. timesteps: int or list of floats Number of time steps to forecast or a list of time steps for which the forecasts are computed (relative to the input time step). The elements of the list are required to be in ascending order. rainrate: array_like Array of shape (m,n) containing the most recently observed rain rate field. If set to None, no R(VIL) conversion is done and the outputs are in the same units as the inputs. n_cascade_levels: int, optional The number of cascade levels to use. extrap_method: str, optional Name of the extrapolation method to use. See the documentation of pysteps.extrapolation.interface. ar_order: int, optional The order of the autoregressive model to use. The recommended values are 1 or 2. Using a higher-order model is strongly discouraged because the stationarity of the AR process cannot be guaranteed. ar_window_radius: int, optional The radius of the window to use for determining the parameters of the autoregressive model. Set to None to disable localization. r_vil_window_radius: int, optional The radius of the window to use for determining the R(VIL) relation. Applicable if rainrate is not None. fft_method: str, optional A string defining the FFT method to use (see utils.fft.get_method). Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed, the recommended method is 'pyfftw'. apply_rainrate_mask: bool Apply mask to prevent producing precipitation to areas where it was not originally observed. Defaults to True. Disabling this may improve some verification metrics but increases the number of false alarms. Applicable if rainrate is None. num_workers: int, optional The number of workers to use for parallel computation. Applicable if dask is installed or pyFFTW is used for computing the FFT. When num_workers>1, it is advisable to disable OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous threads. extrap_kwargs: dict, optional Optional dictionary containing keyword arguments for the extrapolation method. See the documentation of pysteps.extrapolation. filter_kwargs: dict, optional Optional dictionary containing keyword arguments for the filter method. See the documentation of pysteps.cascade.bandpass_filters.py. measure_time: bool, optional If True, measure, print and return the computation time. Returns ------- out: ndarray A three-dimensional array of shape (num_timesteps,m,n) containing a time series of forecast precipitation fields. The time series starts from t0+timestep, where timestep is taken from the input VIL/rain rate fields. If measure_time is True, the return value is a three-element tuple containing the nowcast array, the initialization time of the nowcast generator and the time used in the main loop (seconds). References ---------- :cite:`PCLH2020` """""" _check_inputs(vil, rainrate, velocity, timesteps, ar_order) if extrap_kwargs is None: extrap_kwargs = dict() else: extrap_kwargs = extrap_kwargs.copy() if filter_kwargs is None: filter_kwargs = dict() print(""Computing ANVIL nowcast"") print(""-----------------------"") print("""") print(""Inputs"") print(""------"") print(f""input dimensions: {vil.shape[1]}x{vil.shape[2]}"") print("""") print(""Methods"") print(""-------"") print(f""extrapolation: {extrap_method}"") print(f""FFT: {fft_method}"") print("""") print(""Parameters"") print(""----------"") if isinstance(timesteps, int): print(f""number of time steps: {timesteps}"") else: print(f""time steps: {timesteps}"") print(f""parallel threads: {num_workers}"") print(f""number of cascade levels: {n_cascade_levels}"") print(f""order of the ARI(p,1) model: {ar_order}"") if type(ar_window_radius) == int: print(f""ARI(p,1) window radius: {ar_window_radius}"") else: print(""ARI(p,1) window radius: none"") print(f""R(VIL) window radius: {r_vil_window_radius}"") if measure_time: starttime_init = time.time() m, n = vil.shape[1:] vil = vil.copy() if rainrate is None and apply_rainrate_mask: rainrate_mask = vil[-1, :] < 0.1 else: rainrate_mask = None if rainrate is not None: # determine the coefficients fields of the relation R=a*VIL+b by # localized linear regression precip_vil_a, precip_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius) else: r_vil_a, r_vil_b = None, None # transform the input fields to Lagrangian coordinates by extrapolation extrapolator = extrapolation.get_method(extrap_method) extrap_kwargs[""allow_nonfinite_values""] = ( True if np.any(~np.isfinite(vil)) else False ) res = list() def worker(vil, i): return ( i, extrapolator( vil[i, :], velocity, vil.shape[0] - 1 - i, **extrap_kwargs, )[-1], ) for i in range(vil.shape[0] - 1): if not DASK_IMPORTED or num_workers == 1: vil[i, :, :] = worker(vil, i)[1] else: res.append(dask.delayed(worker)(vil, i)) if DASK_IMPORTED and num_workers > 1: num_workers_ = len(res) if num_workers > len(res) else num_workers vil_e = dask.compute(*res, num_workers=num_workers_) for i in range(len(vil_e)): vil[vil_e[i][0], :] = vil_e[i][1] # compute the final mask as the intersection of the masks of the advected # fields mask = np.isfinite(vil[0, :]) for i in range(1, vil.shape[0]): mask = np.logical_and(mask, np.isfinite(vil[i, :])) if rainrate is None and apply_rainrate_mask: rainrate_mask = np.logical_and(rainrate_mask, mask) # apply cascade decomposition to the advected input fields bp_filter_method = cascade.get_method(""gaussian"") bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs) fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers) decomp_method, recomp_method = cascade.get_method(""fft"") vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n)) for i in range(vil.shape[0]): vil_ = vil[i, :].copy() vil_[~np.isfinite(vil_)] = 0.0 vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft) for j in range(n_cascade_levels): vil_dec[j, i, :] = vil_dec_i[""cascade_levels""][j, :] # compute time-lagged correlation coefficients for the cascade levels of # the advected and differenced input fields gamma = np.empty((n_cascade_levels, ar_order, m, n)) for i in range(n_cascade_levels): vil_diff = np.diff(vil_dec[i, :], axis=0) vil_diff[~np.isfinite(vil_diff)] = 0.0 for j in range(ar_order): gamma[i, j, :] = _moving_window_corrcoef( vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius ) if ar_order == 2: # if the order of the ARI model is 2, adjust the correlation coefficients # so that the resulting process is stationary for i in range(n_cascade_levels): gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2( gamma[i, 0, :], gamma[i, 1, :] ) # estimate the parameters of the ARI models phi = [] for i in range(n_cascade_levels): if ar_order > 2: phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1) elif ar_order == 2: phi_ = _estimate_ar2_params(gamma[i, :]) else: phi_ = _estimate_ar1_params(gamma[i, :]) phi.append(phi_) vil_dec = vil_dec[:, -(ar_order + 1) :, :] if measure_time: init_time = time.time() - starttime_init print(""Starting nowcast computation."") rainrate_f = [] extrap_kwargs[""return_displacement""] = True state = {""vil_dec"": vil_dec} params = { ""apply_rainrate_mask"": apply_rainrate_mask, ""mask"": mask, ""n_cascade_levels"": n_cascade_levels, ""phi"": phi, ""rainrate"": rainrate, ""rainrate_mask"": rainrate_mask, ""recomp_method"": recomp_method, ""r_vil_a"": r_vil_a, ""r_vil_b"": r_vil_b, } rainrate_f = nowcast_main_loop( vil[-1, :], velocity, state, timesteps, extrap_method, _update, extrap_kwargs=extrap_kwargs, params=params, measure_time=measure_time, ) if measure_time: rainrate_f, mainloop_time = rainrate_f if measure_time: return np.stack(rainrate_f), init_time, mainloop_time else: return np.stack(rainrate_f) " 49557,"def histogram(a, bins=None, range=None, normed=False, weights=None, density=None): """""" Blocked variant of :func:`numpy.histogram`. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional Either an iterable specifying the ``bins`` or the number of ``bins`` and a ``range`` argument is required as computing ``min`` and ``max`` over blocked arrays is an expensive operation that must be performed explicitly. If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string, it defines the method used to calculate the optimal bin width, as defined by `histogram_bin_edges`. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. The first element of the range must be less than or equal to the second. `range` affects the automatic bin computation as well. While bin width is computed to be optimal based on the actual data within `range`, the bin count will fill the entire range including portions containing no data. normed : bool, optional .. deprecated:: 1.6.0 This is equivalent to the `density` argument, but produces incorrect results for unequal bin widths. It should not be used. .. versionchanged:: 1.15.0 DeprecationWarnings are actually emitted. weights : array_like, optional A dask.array.Array of weights, of the same block structure as `a`. Each value in `a` only contributes its associated weight towards the bin count (instead of 1). If `density` is True, the weights are normalized, so that the integral of the density over the range remains 1. density : bool, optional If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the ``normed`` keyword if given. If ``density`` is True, ``bins`` cannot be a single-number delayed value. It must be a concrete number, or a (possibly-delayed) array/sequence of the bin edges. Returns ------- hist : array The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. Examples -------- Using number of bins and range: >>> import dask.array as da >>> import numpy as np >>> x = da.from_array(np.arange(10000), chunks=10) >>> h, bins = da.histogram(x, bins=10, range=[0, 10000]) >>> bins array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000., 8000., 9000., 10000.]) >>> h.compute() array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]) Explicitly specifying the bins: >>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000])) >>> bins array([ 0, 5000, 10000]) >>> h.compute() array([5000, 5000]) """""" if isinstance(bins, Array): scalar_bins = bins.ndim == 0 # ^ `np.ndim` is not implemented by Dask array. elif isinstance(bins, Delayed): scalar_bins = bins._length is None or bins._length == 1 else: scalar_bins = np.ndim(bins) == 0 if bins is None or (scalar_bins and range is None): raise ValueError( ""dask.array.histogram requires either specifying "" ""bins as an iterable or specifying both a range and "" ""the number of bins"" ) if weights is not None and weights.chunks != a.chunks: raise ValueError(""Input array and weights must have the same chunked structure"") if normed is not False: raise ValueError( ""The normed= keyword argument has been deprecated. "" ""Please use density instead. "" ""See the numpy.histogram docstring for more information."" ) if density and scalar_bins and isinstance(bins, (Array, Delayed)): raise NotImplementedError( ""When `density` is True, `bins` cannot be a scalar Dask object. "" ""It must be a concrete number or a (possibly-delayed) array/sequence of bin edges."" ) for argname, val in [(""bins"", bins), (""range"", range), (""weights"", weights)]: if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins): raise TypeError( ""Dask types besides Array and Delayed are not supported "" ""for `histogram`. For argument `{}`, got: {!r}"".format(argname, val) ) if range is not None: try: if len(range) != 2: raise ValueError( f""range must be a sequence or array of length 2, but got {len(range)} items"" ) if isinstance(range, (Array, np.ndarray)) and range.shape != (2,): raise ValueError( f""range must be a 1-dimensional array of two items, but got an array of shape {range.shape}"" ) except TypeError: raise TypeError( f""Expected a sequence or array for range, not {range}"" ) from None token = tokenize(a, bins, range, weights, density) name = ""histogram-sum-"" + token if scalar_bins: bins = _linspace_from_delayed(range[0], range[1], bins + 1) # ^ NOTE `range[1]` is safe because of the above check, and the initial check # that range must not be None if `scalar_bins` else: if not isinstance(bins, (Array, np.ndarray)): bins = asarray(bins) if bins.ndim != 1: raise ValueError( f""bins must be a 1-dimensional array or sequence, got shape {bins.shape}"" ) (bins_ref, range_ref), deps = unpack_collections([bins, range]) # Map the histogram to all bins, forming a 2D array of histograms, stacked for each chunk if weights is None: dsk = { (name, i, 0): (_block_hist, k, bins_ref, range_ref) for i, k in enumerate(flatten(a.__dask_keys__())) } dtype = np.histogram([])[0].dtype else: a_keys = flatten(a.__dask_keys__()) w_keys = flatten(weights.__dask_keys__()) dsk = { (name, i, 0): (_block_hist, k, bins_ref, range_ref, w) for i, (k, w) in enumerate(zip(a_keys, w_keys)) } dtype = weights.dtype deps = (a,) + deps if weights is not None: deps += (weights,) graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps) # Turn graph into a 2D Array of shape (nchunks, nbins) nchunks = len(list(flatten(a.__dask_keys__()))) nbins = bins.size - 1 # since `bins` is 1D chunks = ((1,) * nchunks, (nbins,)) mapped = Array(graph, name, chunks, dtype=dtype) # Sum over chunks to get the final histogram n = mapped.sum(axis=0) # We need to replicate normed and density options from numpy if density is not None: if density: db = asarray(np.diff(bins).astype(float), chunks=n.chunks) return n / db / n.sum(), bins else: return n, bins else: return n, bins ","def histogram(a, bins=None, range=None, normed=False, weights=None, density=None): """""" Blocked variant of :func:`numpy.histogram`. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional Either an iterable specifying the ``bins`` or the number of ``bins`` and a ``range`` argument is required as computing ``min`` and ``max`` over blocked arrays is an expensive operation that must be performed explicitly. If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string, it defines the method used to calculate the optimal bin width, as defined by `histogram_bin_edges`. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. The first element of the range must be less than or equal to the second. `range` affects the automatic bin computation as well. While bin width is computed to be optimal based on the actual data within `range`, the bin count will fill the entire range including portions containing no data. normed : bool, optional This is equivalent to the ``density`` argument, but produces incorrect results for unequal bin widths. It should not be used. weights : array_like, optional A dask.array.Array of weights, of the same block structure as `a`. Each value in `a` only contributes its associated weight towards the bin count (instead of 1). If `density` is True, the weights are normalized, so that the integral of the density over the range remains 1. density : bool, optional If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the ``normed`` keyword if given. If ``density`` is True, ``bins`` cannot be a single-number delayed value. It must be a concrete number, or a (possibly-delayed) array/sequence of the bin edges. Returns ------- hist : array The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. Examples -------- Using number of bins and range: >>> import dask.array as da >>> import numpy as np >>> x = da.from_array(np.arange(10000), chunks=10) >>> h, bins = da.histogram(x, bins=10, range=[0, 10000]) >>> bins array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000., 8000., 9000., 10000.]) >>> h.compute() array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]) Explicitly specifying the bins: >>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000])) >>> bins array([ 0, 5000, 10000]) >>> h.compute() array([5000, 5000]) """""" if isinstance(bins, Array): scalar_bins = bins.ndim == 0 # ^ `np.ndim` is not implemented by Dask array. elif isinstance(bins, Delayed): scalar_bins = bins._length is None or bins._length == 1 else: scalar_bins = np.ndim(bins) == 0 if bins is None or (scalar_bins and range is None): raise ValueError( ""dask.array.histogram requires either specifying "" ""bins as an iterable or specifying both a range and "" ""the number of bins"" ) if weights is not None and weights.chunks != a.chunks: raise ValueError(""Input array and weights must have the same chunked structure"") if normed is not False: raise ValueError( ""The normed= keyword argument has been deprecated. "" ""Please use density instead. "" ""See the numpy.histogram docstring for more information."" ) if density and scalar_bins and isinstance(bins, (Array, Delayed)): raise NotImplementedError( ""When `density` is True, `bins` cannot be a scalar Dask object. "" ""It must be a concrete number or a (possibly-delayed) array/sequence of bin edges."" ) for argname, val in [(""bins"", bins), (""range"", range), (""weights"", weights)]: if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins): raise TypeError( ""Dask types besides Array and Delayed are not supported "" ""for `histogram`. For argument `{}`, got: {!r}"".format(argname, val) ) if range is not None: try: if len(range) != 2: raise ValueError( f""range must be a sequence or array of length 2, but got {len(range)} items"" ) if isinstance(range, (Array, np.ndarray)) and range.shape != (2,): raise ValueError( f""range must be a 1-dimensional array of two items, but got an array of shape {range.shape}"" ) except TypeError: raise TypeError( f""Expected a sequence or array for range, not {range}"" ) from None token = tokenize(a, bins, range, weights, density) name = ""histogram-sum-"" + token if scalar_bins: bins = _linspace_from_delayed(range[0], range[1], bins + 1) # ^ NOTE `range[1]` is safe because of the above check, and the initial check # that range must not be None if `scalar_bins` else: if not isinstance(bins, (Array, np.ndarray)): bins = asarray(bins) if bins.ndim != 1: raise ValueError( f""bins must be a 1-dimensional array or sequence, got shape {bins.shape}"" ) (bins_ref, range_ref), deps = unpack_collections([bins, range]) # Map the histogram to all bins, forming a 2D array of histograms, stacked for each chunk if weights is None: dsk = { (name, i, 0): (_block_hist, k, bins_ref, range_ref) for i, k in enumerate(flatten(a.__dask_keys__())) } dtype = np.histogram([])[0].dtype else: a_keys = flatten(a.__dask_keys__()) w_keys = flatten(weights.__dask_keys__()) dsk = { (name, i, 0): (_block_hist, k, bins_ref, range_ref, w) for i, (k, w) in enumerate(zip(a_keys, w_keys)) } dtype = weights.dtype deps = (a,) + deps if weights is not None: deps += (weights,) graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps) # Turn graph into a 2D Array of shape (nchunks, nbins) nchunks = len(list(flatten(a.__dask_keys__()))) nbins = bins.size - 1 # since `bins` is 1D chunks = ((1,) * nchunks, (nbins,)) mapped = Array(graph, name, chunks, dtype=dtype) # Sum over chunks to get the final histogram n = mapped.sum(axis=0) # We need to replicate normed and density options from numpy if density is not None: if density: db = asarray(np.diff(bins).astype(float), chunks=n.chunks) return n / db / n.sum(), bins else: return n, bins else: return n, bins " 47715,"def test_keyword_anti_before_best(): """"""Test docstring anti pattern before best pattern"""""" for plugin_rules in get_plugin_manager().hook.get_rules(): for rule in plugin_rules: if rule._check_docstring is True: assert rule.__doc__.index(KEYWORD_ANTI) < rule.__doc__.index(KEYWORD_BEST), \ f""{rule.__name__} keyword {KEYWORD_BEST} appear before {KEYWORD_ANTI}"" ","def test_keyword_anti_before_best(): """"""Test docstring anti pattern before best pattern"""""" for plugin_rules in get_plugin_manager().hook.get_rules(): for rule in plugin_rules: if rule._check_docstring is True: assert rule.__doc__.index(KEYWORD_ANTI) < rule.__doc__.index(KEYWORD_BEST), \ f""{rule.__name__} keyword {KEYWORD_BEST} appears before {KEYWORD_ANTI}"" " 34221,"def test_core_models_in_dir(model_directory: Text, stories: Text, output: Text): from rasa.core.test import compare_models_in_dir, plot_core_results loop = asyncio.get_event_loop() loop.run_until_complete(compare_models_in_dir(model_directory, stories, output)) story_n_path = os.path.join(model_directory, NUMBER_OF_TRAINING_STORIES_FILE) number_of_stories = io_utils.read_json_file(story_n_path) plot_core_results(output, number_of_stories) ","def test_core_models_in_directory(model_directory: Text, stories: Text, output: Text): from rasa.core.test import compare_models_in_dir, plot_core_results loop = asyncio.get_event_loop() loop.run_until_complete(compare_models_in_dir(model_directory, stories, output)) story_n_path = os.path.join(model_directory, NUMBER_OF_TRAINING_STORIES_FILE) number_of_stories = io_utils.read_json_file(story_n_path) plot_core_results(output, number_of_stories) " 29717,"def _running_process_matches(handle): """"""Check whether the current process is same as of handle's Parameters ---------- handle: ``pyvnml.nvml.LP_struct_c_nvmlDevice_t`` NVML handle to CUDA device Returns ------- out: bool ``True`` if device handle's has a CUDA context on the running process, or ``False`` otherwise. """""" init_once() if hasattr(pynvml, ""nvmlDeviceGetComputeRunningProcesses_v2""): running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses_v2(handle) else: running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle) for proc in running_processes: if os.getpid() == proc.pid: return True return False ","def _running_process_matches(handle): """"""Check whether the current process is same as that of handle Parameters ---------- handle: ``pyvnml.nvml.LP_struct_c_nvmlDevice_t`` NVML handle to CUDA device Returns ------- out: bool ``True`` if device handle's has a CUDA context on the running process, or ``False`` otherwise. """""" init_once() if hasattr(pynvml, ""nvmlDeviceGetComputeRunningProcesses_v2""): running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses_v2(handle) else: running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle) for proc in running_processes: if os.getpid() == proc.pid: return True return False " 5425,"def install( pkgs=None, # pylint: disable=R0912,R0913,R0914 requirements=None, bin_env=None, use_wheel=False, no_use_wheel=False, log=None, proxy=None, timeout=None, editable=None, find_links=None, index_url=None, extra_index_url=None, no_index=False, mirrors=None, build=None, target=None, download=None, download_cache=None, source=None, upgrade=False, force_reinstall=False, ignore_installed=False, exists_action=None, no_deps=False, no_install=False, no_download=False, global_options=None, install_options=None, user=None, cwd=None, pre_releases=False, cert=None, allow_all_external=False, allow_external=None, allow_unverified=None, process_dependency_links=False, saltenv=""base"", env_vars=None, use_vt=False, trusted_host=None, no_cache_dir=False, extra_args=None, cache_dir=None, no_binary=None, disable_version_check=False, freeze_command=None, **kwargs ): """""" Install packages with pip Install packages individually or from a pip requirements file. Install packages globally or to a virtualenv. pkgs Comma separated list of packages to install requirements Path to requirements bin_env Path to pip (or to a virtualenv). This can be used to specify the path to the pip to use when more than one Python release is installed (e.g. ``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is specified, it is assumed to be a virtualenv. .. note:: For Windows, if the pip module is being used to upgrade the pip package, bin_env should be the path to the virtualenv or to the python binary that should be used. The pip command is unable to upgrade itself in Windows. use_wheel Prefer wheel archives (requires pip>=1.4) no_use_wheel Force to not use wheel archives (requires pip>=1.4,<10.0.0) no_binary Force to not use binary packages (requires pip >= 7.0.0) Accepts either :all: to disable all binary packages, :none: to empty the set, or one or more package names with commas between them log Log file where a complete (maximum verbosity) record will be kept proxy Specify a proxy in the form ``user:passwd@proxy.server:port``. Note that the ``user:password@`` is optional and required only if you are behind an authenticated proxy. If you provide ``user@proxy.server:port`` then you will be prompted for a password. .. note:: If the Minion has a globaly configured proxy - it will be used even if no proxy was set here. To explicitly disable proxy for pip you should pass ``False`` as a value. timeout Set the socket timeout (default 15 seconds) editable install something editable (e.g. ``git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed``) find_links URL to search for packages index_url Base URL of Python Package Index extra_index_url Extra URLs of package indexes to use in addition to ``index_url`` no_index Ignore package index mirrors Specific mirror URL(s) to query (automatically adds --use-mirrors) .. warning:: This option has been deprecated and removed in pip version 7.0.0. Please use ``index_url`` and/or ``extra_index_url`` instead. build Unpack packages into ``build`` dir target Install packages into ``target`` dir download Download packages into ``download`` instead of installing them download_cache | cache_dir Cache downloaded packages in ``download_cache`` or ``cache_dir`` dir source Check out ``editable`` packages into ``source`` dir upgrade Upgrade all packages to the newest available version force_reinstall When upgrading, reinstall all packages even if they are already up-to-date. ignore_installed Ignore the installed packages (reinstalling instead) exists_action Default action when a path already exists: (s)witch, (i)gnore, (w)ipe, (b)ackup no_deps Ignore package dependencies no_install Download and unpack all packages, but don't actually install them no_download Don't download any packages, just install the ones already downloaded (completes an install run with ``--no-install``) install_options Extra arguments to be supplied to the setup.py install command (e.g. like ``--install-option='--install-scripts=/usr/local/bin'``). Use multiple --install-option options to pass multiple options to setup.py install. If you are using an option with a directory path, be sure to use absolute path. global_options Extra global options to be supplied to the setup.py call before the install command. user The user under which to run pip cwd Directory from which to run pip pre_releases Include pre-releases in the available versions cert Provide a path to an alternate CA bundle allow_all_external Allow the installation of all externally hosted files allow_external Allow the installation of externally hosted files (comma separated list) allow_unverified Allow the installation of insecure and unverifiable files (comma separated list) process_dependency_links Enable the processing of dependency links env_vars Set environment variables that some builds will depend on. For example, a Python C-module may have a Makefile that needs INCLUDE_PATH set to pick up a header file while compiling. This must be in the form of a dictionary or a mapping. Example: .. code-block:: bash salt '*' pip.install django_app env_vars=""{'CUSTOM_PATH': '/opt/django_app'}"" trusted_host Mark this host as trusted, even though it does not have valid or any HTTPS. use_vt Use VT terminal emulation (see output while installing) no_cache_dir Disable the cache. extra_args pip keyword and positional arguments not yet implemented in salt .. code-block:: yaml salt '*' pip.install pandas extra_args=""[{'--latest-pip-kwarg':'param'}, '--latest-pip-arg']"" .. warning:: If unsupported options are passed here that are not supported in a minion's version of pip, a `No such option error` will be thrown. freeze_command Command used to get list of python packages. In Conda environment this should be something like `/opt/conda/bin/pip3 list --export` instead of default `pip freeze` Will be translated into the following pip command: .. code-block:: bash pip install pandas --latest-pip-kwarg param --latest-pip-arg disable_version_check Pip may periodically check PyPI to determine whether a new version of pip is available to download. Passing True for this option disables that check. CLI Example: .. code-block:: bash salt '*' pip.install , salt '*' pip.install requirements=/path/to/requirements.txt salt '*' pip.install bin_env=/path/to/virtualenv salt '*' pip.install bin_env=/path/to/pip_bin Complicated CLI Example: .. code-block:: bash salt '*' pip.install markdown,django \ editable=git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed upgrade=True no_deps=True """""" cwd = _pip_bin_env(cwd, bin_env) cmd = _get_pip_bin(bin_env) cmd.append(""install"") cleanup_requirements, error = _process_requirements( requirements=requirements, cmd=cmd, cwd=cwd, saltenv=saltenv, user=user ) if error: return error cur_version = version(bin_env, cwd, user=user) if use_wheel: min_version = ""1.4"" max_version = ""9.0.3"" too_low = salt.utils.versions.compare( ver1=cur_version, oper=""<"", ver2=min_version ) too_high = salt.utils.versions.compare( ver1=cur_version, oper="">"", ver2=max_version ) if too_low or too_high: logger.error( ""The --use-wheel option is only supported in pip between %s and "" ""%s. The version of pip detected is %s. This option "" ""will be ignored."", min_version, max_version, cur_version, ) else: cmd.append(""--use-wheel"") if no_use_wheel: min_version = ""1.4"" max_version = ""9.0.3"" too_low = salt.utils.versions.compare( ver1=cur_version, oper=""<"", ver2=min_version ) too_high = salt.utils.versions.compare( ver1=cur_version, oper="">"", ver2=max_version ) if too_low or too_high: logger.error( ""The --no-use-wheel option is only supported in pip between %s and "" ""%s. The version of pip detected is %s. This option "" ""will be ignored."", min_version, max_version, cur_version, ) else: cmd.append(""--no-use-wheel"") if no_binary: min_version = ""7.0.0"" too_low = salt.utils.versions.compare( ver1=cur_version, oper=""<"", ver2=min_version ) if too_low: logger.error( ""The --no-binary option is only supported in pip %s and "" ""newer. The version of pip detected is %s. This option "" ""will be ignored."", min_version, cur_version, ) else: if isinstance(no_binary, list): no_binary = "","".join(no_binary) cmd.extend([""--no-binary"", no_binary]) if log: if os.path.isdir(log): raise OSError(""'{}' is a directory. Use --log path_to_file"".format(log)) elif not os.access(log, os.W_OK): raise OSError(""'{}' is not writeable"".format(log)) cmd.extend([""--log"", log]) config = __opts__ if proxy: cmd.extend([""--proxy"", proxy]) # If proxy arg is set to False we won't use the global proxy even if it's set. elif proxy is not False and config.get(""proxy_host"") and config.get(""proxy_port""): if config.get(""proxy_username"") and config.get(""proxy_password""): http_proxy_url = ""http://{proxy_username}:{proxy_password}@{proxy_host}:{proxy_port}"".format( **config ) else: http_proxy_url = ""http://{proxy_host}:{proxy_port}"".format(**config) cmd.extend([""--proxy"", http_proxy_url]) if timeout: try: if isinstance(timeout, float): # Catch floating point input, exception will be caught in # exception class below. raise ValueError(""Timeout cannot be a float"") int(timeout) except ValueError: raise ValueError( ""'{}' is not a valid timeout, must be an integer"".format(timeout) ) cmd.extend([""--timeout"", timeout]) if find_links: if isinstance(find_links, str): find_links = [l.strip() for l in find_links.split("","")] for link in find_links: if not ( salt.utils.url.validate(link, VALID_PROTOS) or os.path.exists(link) ): raise CommandExecutionError( ""'{}' is not a valid URL or path"".format(link) ) cmd.extend([""--find-links"", link]) if no_index and (index_url or extra_index_url): raise CommandExecutionError( ""'no_index' and ('index_url' or 'extra_index_url') are mutually exclusive."" ) if index_url: if not salt.utils.url.validate(index_url, VALID_PROTOS): raise CommandExecutionError(""'{}' is not a valid URL"".format(index_url)) cmd.extend([""--index-url"", index_url]) if extra_index_url: if not salt.utils.url.validate(extra_index_url, VALID_PROTOS): raise CommandExecutionError( ""'{}' is not a valid URL"".format(extra_index_url) ) cmd.extend([""--extra-index-url"", extra_index_url]) if no_index: cmd.append(""--no-index"") if mirrors: # https://github.com/pypa/pip/pull/2641/files#diff-3ef137fb9ffdd400f117a565cd94c188L216 if salt.utils.versions.compare(ver1=cur_version, oper="">="", ver2=""7.0.0""): raise CommandExecutionError( ""pip >= 7.0.0 does not support mirror argument:"" "" use index_url and/or extra_index_url instead"" ) if isinstance(mirrors, str): mirrors = [m.strip() for m in mirrors.split("","")] cmd.append(""--use-mirrors"") for mirror in mirrors: if not mirror.startswith(""http://""): raise CommandExecutionError(""'{}' is not a valid URL"".format(mirror)) cmd.extend([""--mirrors"", mirror]) if disable_version_check: cmd.extend([""--disable-pip-version-check""]) if build: cmd.extend([""--build"", build]) if target: cmd.extend([""--target"", target]) if download: cmd.extend([""--download"", download]) if download_cache or cache_dir: cmd.extend( [ ""--cache-dir"" if salt.utils.versions.compare(ver1=cur_version, oper="">="", ver2=""6.0"") else ""--download-cache"", download_cache or cache_dir, ] ) if source: cmd.extend([""--source"", source]) if upgrade: cmd.append(""--upgrade"") if force_reinstall: cmd.append(""--force-reinstall"") if ignore_installed: cmd.append(""--ignore-installed"") if exists_action: if exists_action.lower() not in (""s"", ""i"", ""w"", ""b""): raise CommandExecutionError( ""The exists_action pip option only supports the values "" ""s, i, w, and b. '{}' is not valid."".format(exists_action) ) cmd.extend([""--exists-action"", exists_action]) if no_deps: cmd.append(""--no-deps"") if no_install: cmd.append(""--no-install"") if no_download: cmd.append(""--no-download"") if no_cache_dir: cmd.append(""--no-cache-dir"") if pre_releases: # Check the locally installed pip version pip_version = cur_version # From pip v1.4 the --pre flag is available if salt.utils.versions.compare(ver1=pip_version, oper="">="", ver2=""1.4""): cmd.append(""--pre"") if cert: cmd.extend([""--cert"", cert]) if global_options: if isinstance(global_options, str): global_options = [go.strip() for go in global_options.split("","")] for opt in global_options: cmd.extend([""--global-option"", opt]) if install_options: if isinstance(install_options, str): install_options = [io.strip() for io in install_options.split("","")] for opt in install_options: cmd.extend([""--install-option"", opt]) if pkgs: if not isinstance(pkgs, list): try: pkgs = [p.strip() for p in pkgs.split("","")] except AttributeError: pkgs = [p.strip() for p in str(pkgs).split("","")] pkgs = salt.utils.data.stringify(salt.utils.data.decode_list(pkgs)) # It's possible we replaced version-range commas with semicolons so # they would survive the previous line (in the pip.installed state). # Put the commas back in while making sure the names are contained in # quotes, this allows for proper version spec passing salt>=0.17.0 cmd.extend([p.replace("";"", "","") for p in pkgs]) elif not any([requirements, editable]): # Starting with pip 10.0.0, if no packages are specified in the # command, it returns a retcode 1. So instead of running the command, # just return the output without running pip. return {""retcode"": 0, ""stdout"": ""No packages to install.""} if editable: egg_match = re.compile(r""(?:#|#.*?&)egg=([^&]*)"") if isinstance(editable, str): editable = [e.strip() for e in editable.split("","")] for entry in editable: # Is the editable local? if not (entry == ""."" or entry.startswith((""file://"", ""/""))): match = egg_match.search(entry) if not match or not match.group(1): # Missing #egg=theEggName raise CommandExecutionError( ""You must specify an egg for this editable"" ) cmd.extend([""--editable"", entry]) if allow_all_external: cmd.append(""--allow-all-external"") if allow_external: if isinstance(allow_external, str): allow_external = [p.strip() for p in allow_external.split("","")] for pkg in allow_external: cmd.extend([""--allow-external"", pkg]) if allow_unverified: if isinstance(allow_unverified, str): allow_unverified = [p.strip() for p in allow_unverified.split("","")] for pkg in allow_unverified: cmd.extend([""--allow-unverified"", pkg]) if process_dependency_links: cmd.append(""--process-dependency-links"") if trusted_host: cmd.extend([""--trusted-host"", trusted_host]) if extra_args: # These are arguments from the latest version of pip that # have not yet been implemented in salt for arg in extra_args: # It is a keyword argument if isinstance(arg, dict): # There will only ever be one item in this dictionary key, val = arg.popitem() # Don't allow any recursion into keyword arg definitions # Don't allow multiple definitions of a keyword if isinstance(val, (dict, list)): raise TypeError(""Too many levels in: {}"".format(key)) # This is a a normal one-to-one keyword argument cmd.extend([key, val]) # It is a positional argument, append it to the list else: cmd.append(arg) cmd_kwargs = dict(saltenv=saltenv, use_vt=use_vt, runas=user) if kwargs: cmd_kwargs.update(kwargs) if env_vars: cmd_kwargs.setdefault(""env"", {}).update(_format_env_vars(env_vars)) try: if cwd: cmd_kwargs[""cwd""] = cwd if bin_env and os.path.isdir(bin_env): cmd_kwargs.setdefault(""env"", {})[""VIRTUAL_ENV""] = bin_env logger.debug( ""TRY BLOCK: end of pip.install -- cmd: %s, cmd_kwargs: %s"", cmd, cmd_kwargs ) return __salt__[""cmd.run_all""](cmd, python_shell=False, **cmd_kwargs) finally: _clear_context(bin_env) for tempdir in [cr for cr in cleanup_requirements if cr is not None]: if os.path.isdir(tempdir): shutil.rmtree(tempdir) ","def install( pkgs=None, # pylint: disable=R0912,R0913,R0914 requirements=None, bin_env=None, use_wheel=False, no_use_wheel=False, log=None, proxy=None, timeout=None, editable=None, find_links=None, index_url=None, extra_index_url=None, no_index=False, mirrors=None, build=None, target=None, download=None, download_cache=None, source=None, upgrade=False, force_reinstall=False, ignore_installed=False, exists_action=None, no_deps=False, no_install=False, no_download=False, global_options=None, install_options=None, user=None, cwd=None, pre_releases=False, cert=None, allow_all_external=False, allow_external=None, allow_unverified=None, process_dependency_links=False, saltenv=""base"", env_vars=None, use_vt=False, trusted_host=None, no_cache_dir=False, extra_args=None, cache_dir=None, no_binary=None, disable_version_check=False, freeze_command=None, **kwargs ): """""" Install packages with pip Install packages individually or from a pip requirements file. Install packages globally or to a virtualenv. pkgs Comma separated list of packages to install requirements Path to requirements bin_env Path to pip (or to a virtualenv). This can be used to specify the path to the pip to use when more than one Python release is installed (e.g. ``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is specified, it is assumed to be a virtualenv. .. note:: For Windows, if the pip module is being used to upgrade the pip package, bin_env should be the path to the virtualenv or to the python binary that should be used. The pip command is unable to upgrade itself in Windows. use_wheel Prefer wheel archives (requires pip>=1.4) no_use_wheel Force to not use wheel archives (requires pip>=1.4,<10.0.0) no_binary Force to not use binary packages (requires pip >= 7.0.0) Accepts either :all: to disable all binary packages, :none: to empty the set, or one or more package names with commas between them log Log file where a complete (maximum verbosity) record will be kept proxy Specify a proxy in the form ``user:passwd@proxy.server:port``. Note that the ``user:password@`` is optional and required only if you are behind an authenticated proxy. If you provide ``user@proxy.server:port`` then you will be prompted for a password. .. note:: If the Minion has a globaly configured proxy - it will be used even if no proxy was set here. To explicitly disable proxy for pip you should pass ``False`` as a value. timeout Set the socket timeout (default 15 seconds) editable install something editable (e.g. ``git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed``) find_links URL to search for packages index_url Base URL of Python Package Index extra_index_url Extra URLs of package indexes to use in addition to ``index_url`` no_index Ignore package index mirrors Specific mirror URL(s) to query (automatically adds --use-mirrors) .. warning:: This option has been deprecated and removed in pip version 7.0.0. Please use ``index_url`` and/or ``extra_index_url`` instead. build Unpack packages into ``build`` dir target Install packages into ``target`` dir download Download packages into ``download`` instead of installing them download_cache | cache_dir Cache downloaded packages in ``download_cache`` or ``cache_dir`` dir source Check out ``editable`` packages into ``source`` dir upgrade Upgrade all packages to the newest available version force_reinstall When upgrading, reinstall all packages even if they are already up-to-date. ignore_installed Ignore the installed packages (reinstalling instead) exists_action Default action when a path already exists: (s)witch, (i)gnore, (w)ipe, (b)ackup no_deps Ignore package dependencies no_install Download and unpack all packages, but don't actually install them no_download Don't download any packages, just install the ones already downloaded (completes an install run with ``--no-install``) install_options Extra arguments to be supplied to the setup.py install command (e.g. like ``--install-option='--install-scripts=/usr/local/bin'``). Use multiple --install-option options to pass multiple options to setup.py install. If you are using an option with a directory path, be sure to use absolute path. global_options Extra global options to be supplied to the setup.py call before the install command. user The user under which to run pip cwd Directory from which to run pip pre_releases Include pre-releases in the available versions cert Provide a path to an alternate CA bundle allow_all_external Allow the installation of all externally hosted files allow_external Allow the installation of externally hosted files (comma separated list) allow_unverified Allow the installation of insecure and unverifiable files (comma separated list) process_dependency_links Enable the processing of dependency links env_vars Set environment variables that some builds will depend on. For example, a Python C-module may have a Makefile that needs INCLUDE_PATH set to pick up a header file while compiling. This must be in the form of a dictionary or a mapping. Example: .. code-block:: bash salt '*' pip.install django_app env_vars=""{'CUSTOM_PATH': '/opt/django_app'}"" trusted_host Mark this host as trusted, even though it does not have valid or any HTTPS. use_vt Use VT terminal emulation (see output while installing) no_cache_dir Disable the cache. extra_args pip keyword and positional arguments not yet implemented in salt .. code-block:: yaml salt '*' pip.install pandas extra_args=""[{'--latest-pip-kwarg':'param'}, '--latest-pip-arg']"" .. warning:: If unsupported options are passed here that are not supported in a minion's version of pip, a `No such option error` will be thrown. freeze_command (Optional) Command used to list installed python packages. In Conda environment this should be something like `/opt/conda/bin/pip3 list --export` instead of default `pip freeze` Will be translated into the following pip command: .. code-block:: bash pip install pandas --latest-pip-kwarg param --latest-pip-arg disable_version_check Pip may periodically check PyPI to determine whether a new version of pip is available to download. Passing True for this option disables that check. CLI Example: .. code-block:: bash salt '*' pip.install , salt '*' pip.install requirements=/path/to/requirements.txt salt '*' pip.install bin_env=/path/to/virtualenv salt '*' pip.install bin_env=/path/to/pip_bin Complicated CLI Example: .. code-block:: bash salt '*' pip.install markdown,django \ editable=git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed upgrade=True no_deps=True """""" cwd = _pip_bin_env(cwd, bin_env) cmd = _get_pip_bin(bin_env) cmd.append(""install"") cleanup_requirements, error = _process_requirements( requirements=requirements, cmd=cmd, cwd=cwd, saltenv=saltenv, user=user ) if error: return error cur_version = version(bin_env, cwd, user=user) if use_wheel: min_version = ""1.4"" max_version = ""9.0.3"" too_low = salt.utils.versions.compare( ver1=cur_version, oper=""<"", ver2=min_version ) too_high = salt.utils.versions.compare( ver1=cur_version, oper="">"", ver2=max_version ) if too_low or too_high: logger.error( ""The --use-wheel option is only supported in pip between %s and "" ""%s. The version of pip detected is %s. This option "" ""will be ignored."", min_version, max_version, cur_version, ) else: cmd.append(""--use-wheel"") if no_use_wheel: min_version = ""1.4"" max_version = ""9.0.3"" too_low = salt.utils.versions.compare( ver1=cur_version, oper=""<"", ver2=min_version ) too_high = salt.utils.versions.compare( ver1=cur_version, oper="">"", ver2=max_version ) if too_low or too_high: logger.error( ""The --no-use-wheel option is only supported in pip between %s and "" ""%s. The version of pip detected is %s. This option "" ""will be ignored."", min_version, max_version, cur_version, ) else: cmd.append(""--no-use-wheel"") if no_binary: min_version = ""7.0.0"" too_low = salt.utils.versions.compare( ver1=cur_version, oper=""<"", ver2=min_version ) if too_low: logger.error( ""The --no-binary option is only supported in pip %s and "" ""newer. The version of pip detected is %s. This option "" ""will be ignored."", min_version, cur_version, ) else: if isinstance(no_binary, list): no_binary = "","".join(no_binary) cmd.extend([""--no-binary"", no_binary]) if log: if os.path.isdir(log): raise OSError(""'{}' is a directory. Use --log path_to_file"".format(log)) elif not os.access(log, os.W_OK): raise OSError(""'{}' is not writeable"".format(log)) cmd.extend([""--log"", log]) config = __opts__ if proxy: cmd.extend([""--proxy"", proxy]) # If proxy arg is set to False we won't use the global proxy even if it's set. elif proxy is not False and config.get(""proxy_host"") and config.get(""proxy_port""): if config.get(""proxy_username"") and config.get(""proxy_password""): http_proxy_url = ""http://{proxy_username}:{proxy_password}@{proxy_host}:{proxy_port}"".format( **config ) else: http_proxy_url = ""http://{proxy_host}:{proxy_port}"".format(**config) cmd.extend([""--proxy"", http_proxy_url]) if timeout: try: if isinstance(timeout, float): # Catch floating point input, exception will be caught in # exception class below. raise ValueError(""Timeout cannot be a float"") int(timeout) except ValueError: raise ValueError( ""'{}' is not a valid timeout, must be an integer"".format(timeout) ) cmd.extend([""--timeout"", timeout]) if find_links: if isinstance(find_links, str): find_links = [l.strip() for l in find_links.split("","")] for link in find_links: if not ( salt.utils.url.validate(link, VALID_PROTOS) or os.path.exists(link) ): raise CommandExecutionError( ""'{}' is not a valid URL or path"".format(link) ) cmd.extend([""--find-links"", link]) if no_index and (index_url or extra_index_url): raise CommandExecutionError( ""'no_index' and ('index_url' or 'extra_index_url') are mutually exclusive."" ) if index_url: if not salt.utils.url.validate(index_url, VALID_PROTOS): raise CommandExecutionError(""'{}' is not a valid URL"".format(index_url)) cmd.extend([""--index-url"", index_url]) if extra_index_url: if not salt.utils.url.validate(extra_index_url, VALID_PROTOS): raise CommandExecutionError( ""'{}' is not a valid URL"".format(extra_index_url) ) cmd.extend([""--extra-index-url"", extra_index_url]) if no_index: cmd.append(""--no-index"") if mirrors: # https://github.com/pypa/pip/pull/2641/files#diff-3ef137fb9ffdd400f117a565cd94c188L216 if salt.utils.versions.compare(ver1=cur_version, oper="">="", ver2=""7.0.0""): raise CommandExecutionError( ""pip >= 7.0.0 does not support mirror argument:"" "" use index_url and/or extra_index_url instead"" ) if isinstance(mirrors, str): mirrors = [m.strip() for m in mirrors.split("","")] cmd.append(""--use-mirrors"") for mirror in mirrors: if not mirror.startswith(""http://""): raise CommandExecutionError(""'{}' is not a valid URL"".format(mirror)) cmd.extend([""--mirrors"", mirror]) if disable_version_check: cmd.extend([""--disable-pip-version-check""]) if build: cmd.extend([""--build"", build]) if target: cmd.extend([""--target"", target]) if download: cmd.extend([""--download"", download]) if download_cache or cache_dir: cmd.extend( [ ""--cache-dir"" if salt.utils.versions.compare(ver1=cur_version, oper="">="", ver2=""6.0"") else ""--download-cache"", download_cache or cache_dir, ] ) if source: cmd.extend([""--source"", source]) if upgrade: cmd.append(""--upgrade"") if force_reinstall: cmd.append(""--force-reinstall"") if ignore_installed: cmd.append(""--ignore-installed"") if exists_action: if exists_action.lower() not in (""s"", ""i"", ""w"", ""b""): raise CommandExecutionError( ""The exists_action pip option only supports the values "" ""s, i, w, and b. '{}' is not valid."".format(exists_action) ) cmd.extend([""--exists-action"", exists_action]) if no_deps: cmd.append(""--no-deps"") if no_install: cmd.append(""--no-install"") if no_download: cmd.append(""--no-download"") if no_cache_dir: cmd.append(""--no-cache-dir"") if pre_releases: # Check the locally installed pip version pip_version = cur_version # From pip v1.4 the --pre flag is available if salt.utils.versions.compare(ver1=pip_version, oper="">="", ver2=""1.4""): cmd.append(""--pre"") if cert: cmd.extend([""--cert"", cert]) if global_options: if isinstance(global_options, str): global_options = [go.strip() for go in global_options.split("","")] for opt in global_options: cmd.extend([""--global-option"", opt]) if install_options: if isinstance(install_options, str): install_options = [io.strip() for io in install_options.split("","")] for opt in install_options: cmd.extend([""--install-option"", opt]) if pkgs: if not isinstance(pkgs, list): try: pkgs = [p.strip() for p in pkgs.split("","")] except AttributeError: pkgs = [p.strip() for p in str(pkgs).split("","")] pkgs = salt.utils.data.stringify(salt.utils.data.decode_list(pkgs)) # It's possible we replaced version-range commas with semicolons so # they would survive the previous line (in the pip.installed state). # Put the commas back in while making sure the names are contained in # quotes, this allows for proper version spec passing salt>=0.17.0 cmd.extend([p.replace("";"", "","") for p in pkgs]) elif not any([requirements, editable]): # Starting with pip 10.0.0, if no packages are specified in the # command, it returns a retcode 1. So instead of running the command, # just return the output without running pip. return {""retcode"": 0, ""stdout"": ""No packages to install.""} if editable: egg_match = re.compile(r""(?:#|#.*?&)egg=([^&]*)"") if isinstance(editable, str): editable = [e.strip() for e in editable.split("","")] for entry in editable: # Is the editable local? if not (entry == ""."" or entry.startswith((""file://"", ""/""))): match = egg_match.search(entry) if not match or not match.group(1): # Missing #egg=theEggName raise CommandExecutionError( ""You must specify an egg for this editable"" ) cmd.extend([""--editable"", entry]) if allow_all_external: cmd.append(""--allow-all-external"") if allow_external: if isinstance(allow_external, str): allow_external = [p.strip() for p in allow_external.split("","")] for pkg in allow_external: cmd.extend([""--allow-external"", pkg]) if allow_unverified: if isinstance(allow_unverified, str): allow_unverified = [p.strip() for p in allow_unverified.split("","")] for pkg in allow_unverified: cmd.extend([""--allow-unverified"", pkg]) if process_dependency_links: cmd.append(""--process-dependency-links"") if trusted_host: cmd.extend([""--trusted-host"", trusted_host]) if extra_args: # These are arguments from the latest version of pip that # have not yet been implemented in salt for arg in extra_args: # It is a keyword argument if isinstance(arg, dict): # There will only ever be one item in this dictionary key, val = arg.popitem() # Don't allow any recursion into keyword arg definitions # Don't allow multiple definitions of a keyword if isinstance(val, (dict, list)): raise TypeError(""Too many levels in: {}"".format(key)) # This is a a normal one-to-one keyword argument cmd.extend([key, val]) # It is a positional argument, append it to the list else: cmd.append(arg) cmd_kwargs = dict(saltenv=saltenv, use_vt=use_vt, runas=user) if kwargs: cmd_kwargs.update(kwargs) if env_vars: cmd_kwargs.setdefault(""env"", {}).update(_format_env_vars(env_vars)) try: if cwd: cmd_kwargs[""cwd""] = cwd if bin_env and os.path.isdir(bin_env): cmd_kwargs.setdefault(""env"", {})[""VIRTUAL_ENV""] = bin_env logger.debug( ""TRY BLOCK: end of pip.install -- cmd: %s, cmd_kwargs: %s"", cmd, cmd_kwargs ) return __salt__[""cmd.run_all""](cmd, python_shell=False, **cmd_kwargs) finally: _clear_context(bin_env) for tempdir in [cr for cr in cleanup_requirements if cr is not None]: if os.path.isdir(tempdir): shutil.rmtree(tempdir) " 20333,"def verify_local_bear(bear, valid_files, invalid_files, filename=None, settings={}, aspects=None, force_linebreaks=True, create_tempfile=True, timeout=None, tempfile_kwargs={}, base_directory=None): """""" Generates a test for a local bear by checking the given valid and invalid file contents. Simply use it on your module level like: YourTestName = verify_local_bear(YourBear, (['valid line'],), (['invalid line'],)) :param bear: The Bear class to test. :param valid_files: An iterable of files as a string list that won't yield results. :param invalid_files: An iterable of files as a string list that must yield results. :param filename: The filename to use for valid and invalid files. :param settings: A dictionary of keys and values (both string) from which settings will be created that will be made available for the tested bear. :param aspects: A list of aspect objects along with the name and value of their respective tastes. :param force_linebreaks: Whether to append newlines at each line if needed. (Bears expect a \\n for every line) :param create_tempfile: Whether to save lines in tempfile if needed. :param timeout: Unused. Use pytest-timeout or similar. :param tempfile_kwargs: Kwargs passed to tempfile.mkstemp() if tempfile needs to be created. :param base_directory: Path to base directory. Change to base_directory while running the bear. :return: A unittest.TestCase object. """""" if timeout: logging.warning('timeout is ignored as the timeout set in the repo ' 'configuration will be sufficient. Use pytest-timeout ' 'or similar to achieve same result.') @generate_skip_decorator(bear) class LocalBearTest(LocalBearTestHelper): def setUp(self): self.section = Section('name') self.uut = bear(self.section, queue.Queue()) for name, value in settings.items(): self.section.append(Setting(name, value)) if aspects: self.section.aspects = aspects if base_directory: change_directory(base_directory) def test_valid_files(self): self.assertIsInstance(valid_files, (list, tuple)) for file in valid_files: self.check_validity(self.uut, file.splitlines(keepends=True), filename, valid=True, force_linebreaks=force_linebreaks, create_tempfile=create_tempfile, tempfile_kwargs=tempfile_kwargs) def test_invalid_files(self): self.assertIsInstance(invalid_files, (list, tuple)) for file in invalid_files: self.check_validity(self.uut, file.splitlines(keepends=True), filename, valid=False, force_linebreaks=force_linebreaks, create_tempfile=create_tempfile, tempfile_kwargs=tempfile_kwargs) return LocalBearTest ","def verify_local_bear(bear, valid_files, invalid_files, filename=None, settings={}, aspects=None, force_linebreaks=True, create_tempfile=True, timeout=None, tempfile_kwargs={}, base_directory=None): """""" Generates a test for a local bear by checking the given valid and invalid file contents. Simply use it on your module level like: YourTestName = verify_local_bear(YourBear, (['valid line'],), (['invalid line'],)) :param bear: The Bear class to test. :param valid_files: An iterable of files as a string list that won't yield results. :param invalid_files: An iterable of files as a string list that must yield results. :param filename: The filename to use for valid and invalid files. :param settings: A dictionary of keys and values (both string) from which settings will be created that will be made available for the tested bear. :param aspects: A list of aspect objects along with the name and value of their respective tastes. :param force_linebreaks: Whether to append newlines at each line if needed. (Bears expect a \\n for every line) :param create_tempfile: Whether to save lines in tempfile if needed. :param timeout: Unused. Use pytest-timeout or similar. :param tempfile_kwargs: Kwargs passed to tempfile.mkstemp() if tempfile needs to be created. :param base_directory: Path to the base directory. Changes the CWD to base_directory while running the bear. :return: A unittest.TestCase object. """""" if timeout: logging.warning('timeout is ignored as the timeout set in the repo ' 'configuration will be sufficient. Use pytest-timeout ' 'or similar to achieve same result.') @generate_skip_decorator(bear) class LocalBearTest(LocalBearTestHelper): def setUp(self): self.section = Section('name') self.uut = bear(self.section, queue.Queue()) for name, value in settings.items(): self.section.append(Setting(name, value)) if aspects: self.section.aspects = aspects if base_directory: change_directory(base_directory) def test_valid_files(self): self.assertIsInstance(valid_files, (list, tuple)) for file in valid_files: self.check_validity(self.uut, file.splitlines(keepends=True), filename, valid=True, force_linebreaks=force_linebreaks, create_tempfile=create_tempfile, tempfile_kwargs=tempfile_kwargs) def test_invalid_files(self): self.assertIsInstance(invalid_files, (list, tuple)) for file in invalid_files: self.check_validity(self.uut, file.splitlines(keepends=True), filename, valid=False, force_linebreaks=force_linebreaks, create_tempfile=create_tempfile, tempfile_kwargs=tempfile_kwargs) return LocalBearTest " 12587,"def load_plugins(build_configuration, plugins, working_set): """"""Load named plugins from the current working_set into the supplied build_configuration ""Loading"" a plugin here refers to calling registration methods -- it is assumed each plugin is already on the path and an error will be thrown if it is not. Plugins should define their entrypoints in the `pantsbuild.plugin` group when configuring their distribution. Like source backends, the `build_file_aliases`, `global_subsystems` and `register_goals` methods are called if those entry points are defined. * Plugins are loaded in the order they are provided. * This is important as loading can add, remove or replace existing tasks installed by other plugins. If a plugin needs to assert that another plugin is registered before it, it can define an entrypoint ""load_after"" which can return a list of plugins which must have been loaded before it can be loaded. This does not change the order or what plugins are loaded in any way -- it is purely an assertion to guard against misconfiguration. :param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases). :param list plugins: A list of plugin names optionally with versions, in requirement format. eg ['widgetpublish', 'widgetgen==1.2']. :param WorkingSet working_set: A pkg_resources.WorkingSet to load plugins from. """""" loaded = {} for plugin in plugins: print(f""PLUGIN: {plugin}"") req = Requirement.parse(plugin) dist = working_set.find(req) if not dist: raise PluginNotFound(f'Could not find plugin: {req}') entries = dist.get_entry_map().get('pantsbuild.plugin', {}) if 'load_after' in entries: deps = entries['load_after'].load()() for dep_name in deps: dep = Requirement.parse(dep_name) if dep.key not in loaded: raise PluginLoadOrderError(f'Plugin {plugin} must be loaded after {dep}') if 'build_file_aliases' in entries: aliases = entries['build_file_aliases'].load()() build_configuration.register_aliases(aliases) if 'register_goals' in entries: entries['register_goals'].load()() if 'global_subsystems' in entries: subsystems = entries['global_subsystems'].load()() build_configuration.register_optionables(subsystems) if 'rules' in entries: rules = entries['rules'].load()() build_configuration.register_rules(rules) loaded[dist.as_requirement().key] = dist ","def load_plugins(build_configuration, plugins, working_set): """"""Load named plugins from the current working_set into the supplied build_configuration ""Loading"" a plugin here refers to calling registration methods -- it is assumed each plugin is already on the path and an error will be thrown if it is not. Plugins should define their entrypoints in the `pantsbuild.plugin` group when configuring their distribution. Like source backends, the `build_file_aliases`, `global_subsystems` and `register_goals` methods are called if those entry points are defined. * Plugins are loaded in the order they are provided. * This is important as loading can add, remove or replace existing tasks installed by other plugins. If a plugin needs to assert that another plugin is registered before it, it can define an entrypoint ""load_after"" which can return a list of plugins which must have been loaded before it can be loaded. This does not change the order or what plugins are loaded in any way -- it is purely an assertion to guard against misconfiguration. :param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases). :param list plugins: A list of plugin names optionally with versions, in requirement format. eg ['widgetpublish', 'widgetgen==1.2']. :param WorkingSet working_set: A pkg_resources.WorkingSet to load plugins from. """""" loaded = {} for plugin in plugins: logger.debug(f""loaded plugin requirement: {plugin}"") req = Requirement.parse(plugin) dist = working_set.find(req) if not dist: raise PluginNotFound(f'Could not find plugin: {req}') entries = dist.get_entry_map().get('pantsbuild.plugin', {}) if 'load_after' in entries: deps = entries['load_after'].load()() for dep_name in deps: dep = Requirement.parse(dep_name) if dep.key not in loaded: raise PluginLoadOrderError(f'Plugin {plugin} must be loaded after {dep}') if 'build_file_aliases' in entries: aliases = entries['build_file_aliases'].load()() build_configuration.register_aliases(aliases) if 'register_goals' in entries: entries['register_goals'].load()() if 'global_subsystems' in entries: subsystems = entries['global_subsystems'].load()() build_configuration.register_optionables(subsystems) if 'rules' in entries: rules = entries['rules'].load()() build_configuration.register_rules(rules) loaded[dist.as_requirement().key] = dist " 6610,"def create_material_request(material_requests): """""" Create indent on reaching reorder level """""" mr_list = [] exceptions_list = [] def _log_exception(): if frappe.local.message_log: exceptions_list.extend(frappe.local.message_log) frappe.local.message_log = [] else: exceptions_list.append(frappe.get_traceback()) frappe.log_error(frappe.get_traceback()) for request_type in material_requests: for company in material_requests[request_type]: try: items = material_requests[request_type][company] if not items: continue mr = frappe.new_doc(""Material Request"") mr.update({ ""company"": company, ""transaction_date"": nowdate(), ""material_request_type"": ""Material Transfer"" if request_type==""Transfer"" else request_type }) for d in items: d = frappe._dict(d) item = frappe.get_doc(""Item"", d.item_code) uom = item.stock_uom conversion_factor = 1.0 if request_type == 'Purchase': uom = item.purchase_uom or item.stock_uom if uom != item.stock_uom: conversion_factor = frappe.db.get_value(""UOM Conversion Detail"", {'parent': item.name, 'uom': uom}, 'conversion_factor') or 1.0 must_be_whole_number = frappe.get_value(""UOM"", uom, ""must_be_whole_number"") qty = d.reorder_qty / conversion_factor if must_be_whole_number: qty = ceil(qty) mr.append(""items"", { ""doctype"": ""Material Request Item"", ""item_code"": d.item_code, ""schedule_date"": add_days(nowdate(),cint(item.lead_time_days)), ""qty"": qty, ""uom"": uom, ""stock_uom"": item.stock_uom, ""warehouse"": d.warehouse, ""item_name"": item.item_name, ""description"": item.description, ""item_group"": item.item_group, ""brand"": item.brand, }) schedule_dates = [d.schedule_date for d in mr.items] mr.schedule_date = max(schedule_dates or [nowdate()]) mr.flags.ignore_mandatory = True mr.insert() mr.submit() mr_list.append(mr) except Exception: _log_exception() if mr_list: if getattr(frappe.local, ""reorder_email_notify"", None) is None: frappe.local.reorder_email_notify = cint(frappe.db.get_value('Stock Settings', None, 'reorder_email_notify')) if(frappe.local.reorder_email_notify): send_email_notification(mr_list) if exceptions_list: notify_errors(exceptions_list) return mr_list ","def create_material_request(material_requests): """""" Create indent on reaching reorder level """""" mr_list = [] exceptions_list = [] def _log_exception(): if frappe.local.message_log: exceptions_list.extend(frappe.local.message_log) frappe.local.message_log = [] else: exceptions_list.append(frappe.get_traceback()) frappe.log_error(frappe.get_traceback()) for request_type in material_requests: for company in material_requests[request_type]: try: items = material_requests[request_type][company] if not items: continue mr = frappe.new_doc(""Material Request"") mr.update({ ""company"": company, ""transaction_date"": nowdate(), ""material_request_type"": ""Material Transfer"" if request_type==""Transfer"" else request_type }) for d in items: d = frappe._dict(d) item = frappe.get_doc(""Item"", d.item_code) uom = item.stock_uom conversion_factor = 1.0 if request_type == 'Purchase': uom = item.purchase_uom or item.stock_uom if uom != item.stock_uom: conversion_factor = frappe.db.get_value(""UOM Conversion Detail"", {'parent': item.name, 'uom': uom}, 'conversion_factor') or 1.0 must_be_whole_number = frappe.db.get_value(""UOM"", uom, ""must_be_whole_number"", cache=True) qty = d.reorder_qty / conversion_factor if must_be_whole_number: qty = ceil(qty) mr.append(""items"", { ""doctype"": ""Material Request Item"", ""item_code"": d.item_code, ""schedule_date"": add_days(nowdate(),cint(item.lead_time_days)), ""qty"": qty, ""uom"": uom, ""stock_uom"": item.stock_uom, ""warehouse"": d.warehouse, ""item_name"": item.item_name, ""description"": item.description, ""item_group"": item.item_group, ""brand"": item.brand, }) schedule_dates = [d.schedule_date for d in mr.items] mr.schedule_date = max(schedule_dates or [nowdate()]) mr.flags.ignore_mandatory = True mr.insert() mr.submit() mr_list.append(mr) except Exception: _log_exception() if mr_list: if getattr(frappe.local, ""reorder_email_notify"", None) is None: frappe.local.reorder_email_notify = cint(frappe.db.get_value('Stock Settings', None, 'reorder_email_notify')) if(frappe.local.reorder_email_notify): send_email_notification(mr_list) if exceptions_list: notify_errors(exceptions_list) return mr_list " 54509,"def run(args: argparse.Namespace) -> None: kurobako_cmd = os.path.join(args.path_to_kurobako, ""kurobako"") subprocess.run(f""{kurobako_cmd} --version"", shell=True) if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)): raise ValueError(f""Data directory {args.data_dir} cannot be found."") os.makedirs(args.out_dir, exist_ok=True) study_json_fn = os.path.join(args.out_dir, ""studies.json"") subprocess.check_call(f""echo >| {study_json_fn}"", shell=True) solvers_filename = os.path.join(args.out_dir, ""solvers.json"") subprocess.check_call(f""echo >| {solvers_filename}"", shell=True) problems_filename = os.path.join(args.out_dir, ""problems.json"") subprocess.check_call(f""echo >| {problems_filename}"", shell=True) # Create ZDT problems cmd = f""{kurobako_cmd} problem-suite zdt | tee -a {problems_filename}"" subprocess.run(cmd, shell=True) # Create NAS bench problem(C) (for Multi-Objective Settings). dataset = os.path.join(args.data_dir, ""nasbench_full.bin"") cmd = ( f'{kurobako_cmd} problem nasbench ""{dataset}""' f""--encoding C --metrics accuracy params | tee -a {problems_filename}"" ) subprocess.run(cmd, shell=True) # Create solvers. sampler_list = args.sampler_list.split() sampler_kwargs_list = args.sampler_kwargs_list.split() if len(sampler_list) != len(sampler_kwargs_list): raise ValueError( ""The number of samplers does not match the given keyword arguments. \n"" f""sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}."" ) for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list): name = f""{args.name_prefix}_{sampler}"" python_command = f""mo_runner.py {sampler} {sampler_kwargs}"" cmd = ( f""{kurobako_cmd} solver --name {name} command python {python_command}"" f""| tee -a {solvers_filename}"" ) subprocess.run(cmd, shell=True) # Create study. cmd = ( f""{kurobako_cmd} studies --budget 1000 "" f""--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) "" f""--repeats {args.n_runs} --seed {args.seed} "" f""> {study_json_fn}"" ) subprocess.run(cmd, shell=True) result_filename = os.path.join(args.out_dir, ""results.json"") cmd = ( f""cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} "" f""> {result_filename}"" ) subprocess.run(cmd, shell=True) # Report report_filename = os.path.join(args.out_dir, ""report.md"") cmd = f""cat {result_filename} | {kurobako_cmd} report > {report_filename}"" subprocess.run(cmd, shell=True) # Plot pareto-front. problem_names = [""NASBench"", ""ZDT1"", ""ZDT2"", ""ZDT3"", ""ZDT4"", ""ZDT5"", ""ZDT6""] for problem_name in problem_names: cmd = ( f""cat {result_filename} | grep {problem_name} | "" f""{kurobako_cmd} plot pareto-front -o {args.out_dir}"" ) subprocess.run(cmd, shell=True) ","def run(args: argparse.Namespace) -> None: kurobako_cmd = os.path.join(args.path_to_kurobako, ""kurobako"") subprocess.run(f""{kurobako_cmd} --version"", shell=True) if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)): raise ValueError(f""Data directory {args.data_dir} cannot be found."") os.makedirs(args.out_dir, exist_ok=True) study_json_fn = os.path.join(args.out_dir, ""studies.json"") subprocess.check_call(f""echo >| {study_json_fn}"", shell=True) solvers_filename = os.path.join(args.out_dir, ""solvers.json"") subprocess.check_call(f""echo >| {solvers_filename}"", shell=True) problems_filename = os.path.join(args.out_dir, ""problems.json"") subprocess.check_call(f""echo >| {problems_filename}"", shell=True) # Create ZDT problems cmd = f""{kurobako_cmd} problem-suite zdt | tee -a {problems_filename}"" subprocess.run(cmd, shell=True) # Create NAS bench problem(C) (for Multi-Objective Settings). dataset = os.path.join(args.data_dir, ""nasbench_full.bin"") cmd = ( f'{kurobako_cmd} problem nasbench ""{dataset}""' f""--encoding C --metrics accuracy params | tee -a {problems_filename}"" ) subprocess.run(cmd, shell=True) # Create solvers. sampler_list = args.sampler_list.split() sampler_kwargs_list = args.sampler_kwargs_list.split() if len(sampler_list) != len(sampler_kwargs_list): raise ValueError( ""The number of samplers does not match the given keyword arguments. \n"" f""sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}."" ) for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list): name = f""{args.name_prefix}_{sampler}"" python_command = f""mo_runner.py {sampler} {sampler_kwargs}"" cmd = ( f""{kurobako_cmd} solver --name {name} command python {python_command}"" f""| tee -a {solvers_filename}"" ) subprocess.run(cmd, shell=True) # Create study. cmd = ( f""{kurobako_cmd} studies --budget 1000 "" f""--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) "" f""--repeats {args.n_runs} --seed {args.seed} "" f""> {study_json_fn}"" ) subprocess.run(cmd, shell=True) result_filename = os.path.join(args.out_dir, ""results.json"") cmd = ( f""cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} "" f""> {result_filename}"" ) subprocess.run(cmd, shell=True) # Report report_filename = os.path.join(args.out_dir, ""report.md"") cmd = f""cat {result_filename} | {kurobako_cmd} report > {report_filename}"" subprocess.run(cmd, shell=True) # Plot pareto-front. problem_names = [""NASBench"", ""ZDT1"", ""ZDT2"", ""ZDT3"", ""ZDT4"", ""ZDT5"", ""ZDT6""] xmins = [0, 0, 0, 0, 0, 8, 0.2] xmaxs = [25000000, 1, 1, 1, 1, 24, 1] ymins = [0, 1, 2, 0, 20, 1, 5] ymaxs = [0.2, 7, 7, 7, 250, 6, 10] for problem_name, xmin, xmax, ymin, ymax in zip(problem_names, xmins, xmaxs, ymins, ymaxs): cmd = ( f""cat {result_filename} | grep {problem_name} | "" f""{kurobako_cmd} plot pareto-front -o {args.out_dir}"" ) subprocess.run(cmd, shell=True) " 31608,"def list_default_policy_settings_command(client: Client) -> CommandResults: """""" Reset a certain default policy setting. Args: client (Client): The Trend Micro API client. Returns: CommandResults: Command results with raw response, outputs and readable outputs. """""" response = {k: v[""value""] for k, v in client.list_default_policy_settings().items()} markdown = tableToMarkdown(""The Default Policy Settings"", response, removeNull=True, headerTransform=pascalToSpace) outputs = [{""name"": k, ""value"": v} for k, v in response.items()] return CommandResults(outputs_prefix=""TrendMicro.DefaultPolicySettings"", outputs_key_field=""name"", outputs=outputs, readable_output=markdown, raw_response=response) ","def list_default_policy_settings_command(client: Client) -> CommandResults: """""" Reset a certain default policy setting. Args: client (Client): The Trend Micro API client. Returns: CommandResults: Command results with raw response, outputs and readable outputs. """""" response = {k: v.get(""value"") for k, v in client.list_default_policy_settings().items()} markdown = tableToMarkdown(""The Default Policy Settings"", response, removeNull=True, headerTransform=pascalToSpace) outputs = [{""name"": k, ""value"": v} for k, v in response.items()] return CommandResults(outputs_prefix=""TrendMicro.DefaultPolicySettings"", outputs_key_field=""name"", outputs=outputs, readable_output=markdown, raw_response=response) " 7445,"def imsave(fname, arr, **kwargs): """"""Load a tiff image to file. Parameters ---------- fname : str or file File name or file-like-object. arr : ndarray The array to write kwargs : keyword pairs, optional Additional keyword arguments to pass through (see ``tifffile``'s ``imwrite`` function). Notes ----- Provided by the tifffile library [1]_, and supports many advanced image types including multi-page and floating point. This implementation will set `photomotric='RGB'` when writing if the first or last axis of arr has shape 3 or 4. To override this, explicitly specify the photometric kwarg. This implementation will set `planarconfig='SEPARATE'` when writing if the first axis of arr has shape 3 or 4. To override this, explicitly specify the planarconfig kwarg. References ---------- .. [1] https://pypi.org/project/tifffile/ """""" if arr.shape[0] in [3, 4]: if 'planarconfig' not in kwargs: kwargs['planarconfig'] = 'SEPARATE' rgb = True else: rgb = arr.shape[-1] in [3, 4] if rgb and 'photometric' not in kwargs: kwargs['photometric'] = 'RGB' return tifffile_imwrite(fname, arr, **kwargs) ","def imsave(fname, arr, **kwargs): """"""Load a tiff image to file. Parameters ---------- fname : str or file File name or file-like-object. arr : ndarray The array to write kwargs : keyword pairs, optional Additional keyword arguments to pass through (see ``tifffile``'s ``imwrite`` function). Notes ----- Provided by the tifffile library [1]_, and supports many advanced image types including multi-page and floating point. This implementation will set `photomotric='RGB'` when writing if the first or last axis of arr has length 3 or 4. To override this, explicitly specify the photometric kwarg. This implementation will set `planarconfig='SEPARATE'` when writing if the first axis of arr has shape 3 or 4. To override this, explicitly specify the planarconfig kwarg. References ---------- .. [1] https://pypi.org/project/tifffile/ """""" if arr.shape[0] in [3, 4]: if 'planarconfig' not in kwargs: kwargs['planarconfig'] = 'SEPARATE' rgb = True else: rgb = arr.shape[-1] in [3, 4] if rgb and 'photometric' not in kwargs: kwargs['photometric'] = 'RGB' return tifffile_imwrite(fname, arr, **kwargs) " 53367,"def stripped_lines( lines, ignore_comments, ignore_docstrings, ignore_imports, ignore_signatures ): """"""return lines with leading/trailing whitespace and any ignored code features removed """""" if ignore_imports or ignore_signatures: tree = astroid.parse("""".join(lines)) if ignore_imports: node_is_import_by_lineno = ( (node.lineno, isinstance(node, (astroid.Import, astroid.ImportFrom))) for node in tree.body ) line_begins_import = { lineno: all(is_import for _, is_import in node_is_import_group) for lineno, node_is_import_group in groupby( node_is_import_by_lineno, key=lambda x: x[0] ) } current_line_is_import = False if ignore_signatures: functions = filter( lambda node: isinstance( node, (astroid.FunctionDef, astroid.AsyncFunctionDef) ), tree.body, ) signature_lines = set( chain(*(range(func.fromlineno, func.body[0].lineno) for func in functions)) ) strippedlines = [] docstring = None for lineno, line in enumerate(lines, start=1): line = line.strip() if ignore_docstrings: if not docstring: if line.startswith('""""""') or line.startswith(""'''""): docstring = line[:3] line = line[3:] elif line.startswith('r""""""') or line.startswith(""r'''""): docstring = line[1:4] line = line[4:] if docstring: if line.endswith(docstring): docstring = None line = """" if ignore_imports: current_line_is_import = line_begins_import.get( lineno, current_line_is_import ) if current_line_is_import: line = """" if ignore_comments: line = line.split(""#"", 1)[0].strip() if ignore_signatures and lineno in signature_lines: line = """" strippedlines.append(line) return strippedlines ","def stripped_lines( lines, ignore_comments, ignore_docstrings, ignore_imports, ignore_signatures ): """"""return lines with leading/trailing whitespace and any ignored code features removed """""" if ignore_imports or ignore_signatures: tree = astroid.parse("""".join(lines)) if ignore_imports: node_is_import_by_lineno = ( (node.lineno, isinstance(node, (astroid.Import, astroid.ImportFrom))) for node in tree.body ) line_begins_import = { lineno: all(is_import for _, is_import in node_is_import_group) for lineno, node_is_import_group in groupby( node_is_import_by_lineno, key=lambda x: x[0] ) } current_line_is_import = False if ignore_signatures: functions = [ n for n in tree.body if isinstance(n, (astroid.FunctionDef, astroid.AsyncFunctionDef)) ] signature_lines = set( chain(*(range(func.fromlineno, func.body[0].lineno) for func in functions)) ) strippedlines = [] docstring = None for lineno, line in enumerate(lines, start=1): line = line.strip() if ignore_docstrings: if not docstring: if line.startswith('""""""') or line.startswith(""'''""): docstring = line[:3] line = line[3:] elif line.startswith('r""""""') or line.startswith(""r'''""): docstring = line[1:4] line = line[4:] if docstring: if line.endswith(docstring): docstring = None line = """" if ignore_imports: current_line_is_import = line_begins_import.get( lineno, current_line_is_import ) if current_line_is_import: line = """" if ignore_comments: line = line.split(""#"", 1)[0].strip() if ignore_signatures and lineno in signature_lines: line = """" strippedlines.append(line) return strippedlines " 1647,"def test_one_hot_encoder_drop_equals_if_binary(): X = [['Male', 1], ['Female', 3], ['Female', 2]] expected = np.array([[1., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 1., 0.]]) ohe = OneHotEncoder(drop='if_binary') ohe.fit(X) result = ohe.transform(X).toarray() assert_array_equal(expected, result) ","def test_one_hot_encoder_drop_equals_if_binary(): X = [['Male', 1], ['Female', 3], ['Female', 2]] expected = np.array([[1., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 1., 0.]]) ohe = OneHotEncoder(drop='if_binary') ohe.fit(X) result = ohe.transform(X).toarray() assert_allclose(expected, result) " 33495,"def process_tlog(filename): '''convert a tlog to a .m file''' print(""Processing %s"" % filename) mlog = mavutil.mavlink_connection(filename, dialect=args.dialect, zero_time_base=True) # first walk the entire file, grabbing all messages into a hash of lists, #and the first message of each type into a hash msg_types = {} msg_lists = {} types = args.types if types is not None: types = types.split(',') # note that Octave doesn't like any extra '.', '*', '-', characters in the filename (head, tail) = os.path.split(filename) basename = '.'.join(tail.split('.')[:-1]) mfilename = re.sub('[\.\-\+\*]','_', basename) + '.m' # Octave also doesn't like files that don't start with a letter if re.match('^[a-zA-z]', mfilename) is None: mfilename = 'm_' + mfilename if head is not None: mfilename = os.path.join(head, mfilename) print(""Creating %s"" % mfilename) f = open(mfilename, ""w"") type_counters = {} while True: m = mlog.recv_match(condition=args.condition) if m is None: break if types is not None and m.get_type() not in types: continue if m.get_type() == 'BAD_DATA': continue fieldnames = m._fieldnames mtype = m.get_type() if mtype in ['FMT', 'PARM']: continue if mtype not in type_counters: type_counters[mtype] = 0 f.write(""%s.columns = {'timestamp'"" % mtype) for field in fieldnames: val = getattr(m, field) if not isinstance(val, str): if type(val) is not list: f.write("",'%s'"" % field) else: for i in range(0, len(val)): f.write("",'%s%d'"" % (field, i + 1)) f.write(""};\n"") type_counters[mtype] += 1 f.write(""%s.data(%u,:) = [%f"" % (mtype, type_counters[mtype], m._timestamp)) for field in m._fieldnames: val = getattr(m, field) if isinstance(val,unicode): val = val.encode(""ascii"") if not isinstance(val, str): if type(val) is not list: f.write("",%.20g"" % val) else: for i in range(0, len(val)): f.write("",%.20g"" % val[i]) f.write(""];\n"") f.close() ","def process_tlog(filename): '''convert a tlog to a .m file''' print(""Processing %s"" % filename) mlog = mavutil.mavlink_connection(filename, dialect=args.dialect, zero_time_base=True) # first walk the entire file, grabbing all messages into a hash of lists, #and the first message of each type into a hash msg_types = {} msg_lists = {} types = args.types if types is not None: types = types.split(',') # note that Octave doesn't like any extra '.', '*', '-', characters in the filename (head, tail) = os.path.split(filename) basename = '.'.join(tail.split('.')[:-1]) mfilename = re.sub('[\.\-\+\*]','_', basename) + '.m' # Octave also doesn't like files that don't start with a letter if re.match('^[a-zA-z]', mfilename) is None: mfilename = 'm_' + mfilename if head is not None: mfilename = os.path.join(head, mfilename) print(""Creating %s"" % mfilename) f = open(mfilename, ""w"") type_counters = {} while True: m = mlog.recv_match(condition=args.condition) if m is None: break if types is not None and m.get_type() not in types: continue if m.get_type() == 'BAD_DATA': continue fieldnames = m._fieldnames mtype = m.get_type() if mtype in ['FMT', 'PARM']: continue if mtype not in type_counters: type_counters[mtype] = 0 f.write(""%s.columns = {'timestamp'"" % mtype) for field in fieldnames: val = getattr(m, field) if not isinstance(val, str): if type(val) is not list: f.write("",'%s'"" % field) else: for i in range(0, len(val)): f.write("",'%s%d'"" % (field, i + 1)) f.write(""};\n"") type_counters[mtype] += 1 f.write(""%s.data(%u,:) = [%f"" % (mtype, type_counters[mtype], m._timestamp)) for field in m._fieldnames: val = getattr(m, field) if isinstance(val, unicode): val = val.encode(""ascii"") if not isinstance(val, str): if type(val) is not list: f.write("",%.20g"" % val) else: for i in range(0, len(val)): f.write("",%.20g"" % val[i]) f.write(""];\n"") f.close() " 47292,"def load_tf_weights_in_tapas(model, config, tf_checkpoint_path): """""" Load tf checkpoints in a PyTorch model. This is an adaptation from load_tf_weights_in_bert - add cell selection and aggregation heads - take into account additional token type embedding layers """""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( ""Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "" ""https://www.tensorflow.org/install/ for installation instructions."" ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f""Converting TensorFlow checkpoint from {tf_path}"") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f""Loading TF weight {name} with shape {shape}"") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split(""/"") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculate m and v # which are not required for using pretrained model if any( n in [ ""adam_v"", ""adam_m"", ""AdamWeightDecayOptimizer"", ""AdamWeightDecayOptimizer_1"", ""global_step"", ""seq_relationship"", ] for n in name ): logger.info(f""Skipping {'/'.join(name)}"") continue # in case the model is TapasForSequenceClassification, we skip output_bias and output_weights # since these are not used for classification if isinstance(model, TapasForSequenceClassification): if any(n in [""output_bias"", ""output_weights""] for n in name): logger.info(f""Skipping {'/'.join(name)}"") continue # in case the model is TapasModel, we skip output_bias, output_weights, output_bias_cls and output_weights_cls # since this model does not have MLM and NSP heads if isinstance(model, TapasModel): if any(n in [""output_bias"", ""output_weights"", ""output_bias_cls"", ""output_weights_cls""] for n in name): logger.info(f""Skipping {'/'.join(name)}"") continue # in case the model is TapasForMaskedLM, we skip the pooler if isinstance(model, TapasForMaskedLM): if any(n in [""pooler""] for n in name): logger.info(f""Skipping {'/'.join(name)}"") continue # if first scope name starts with ""bert"", change it to ""tapas"" if name[0] == ""bert"": name[0] = ""tapas"" pointer = model for m_name in name: if re.fullmatch(r""[A-Za-z]+_\d+"", m_name): scope_names = re.split(r""_(\d+)"", m_name) else: scope_names = [m_name] if scope_names[0] == ""kernel"" or scope_names[0] == ""gamma"": pointer = getattr(pointer, ""weight"") elif scope_names[0] == ""beta"": pointer = getattr(pointer, ""bias"") # cell selection heads elif scope_names[0] == ""output_bias"": if not isinstance(model, TapasForMaskedLM): pointer = getattr(pointer, ""output_bias"") else: pointer = getattr(pointer, ""bias"") elif scope_names[0] == ""output_weights"": pointer = getattr(pointer, ""output_weights"") elif scope_names[0] == ""column_output_bias"": pointer = getattr(pointer, ""column_output_bias"") elif scope_names[0] == ""column_output_weights"": pointer = getattr(pointer, ""column_output_weights"") # aggregation head elif scope_names[0] == ""output_bias_agg"": pointer = getattr(pointer, ""aggregation_classifier"") pointer = getattr(pointer, ""bias"") elif scope_names[0] == ""output_weights_agg"": pointer = getattr(pointer, ""aggregation_classifier"") pointer = getattr(pointer, ""weight"") # classification head elif scope_names[0] == ""output_bias_cls"": pointer = getattr(pointer, ""classifier"") pointer = getattr(pointer, ""bias"") elif scope_names[0] == ""output_weights_cls"": pointer = getattr(pointer, ""classifier"") pointer = getattr(pointer, ""weight"") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f""Skipping {'/'.join(name)}"") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == ""_embeddings"": pointer = getattr(pointer, ""weight"") elif m_name[-13:] in [f""_embeddings_{i}"" for i in range(7)]: pointer = getattr(pointer, ""weight"") elif m_name == ""kernel"": array = np.transpose(array) try: if not pointer.shape == array.shape: raise ValueError(f""Pointer shape {pointer.shape} and array shape {array.shape} mismatched"") except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f""Initialize PyTorch weight {name}"") # Added a check to see whether the array is a scalar (because bias terms in Tapas checkpoints can be # scalar => should first be converted to numpy arrays) if np.isscalar(array): array = np.array(array) pointer.data = torch.from_numpy(array) return model ","def load_tf_weights_in_tapas(model, config, tf_checkpoint_path): """""" Load tf checkpoints in a PyTorch model. This is an adaptation from load_tf_weights_in_bert - add cell selection and aggregation heads - take into account additional token type embedding layers """""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( ""Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "" ""https://www.tensorflow.org/install/ for installation instructions."" ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f""Converting TensorFlow checkpoint from {tf_path}"") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f""Loading TF weight {name} with shape {shape}"") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split(""/"") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculate m and v # which are not required for using pretrained model if any( n in [ ""adam_v"", ""adam_m"", ""AdamWeightDecayOptimizer"", ""AdamWeightDecayOptimizer_1"", ""global_step"", ""seq_relationship"", ] for n in name ): logger.info(f""Skipping {'/'.join(name)}"") continue # in case the model is TapasForSequenceClassification, we skip output_bias and output_weights # since these are not used for classification if isinstance(model, TapasForSequenceClassification): if any(n in [""output_bias"", ""output_weights""] for n in name): logger.info(f""Skipping {'/'.join(name)}"") continue # in case the model is TapasModel, we skip output_bias, output_weights, output_bias_cls and output_weights_cls # since this model does not have MLM and NSP heads if isinstance(model, TapasModel): if any(n in [""output_bias"", ""output_weights"", ""output_bias_cls"", ""output_weights_cls""] for n in name): logger.info(f""Skipping {'/'.join(name)}"") continue # in case the model is TapasForMaskedLM, we skip the pooler if isinstance(model, TapasForMaskedLM): if any(n in [""pooler""] for n in name): logger.info(f""Skipping {'/'.join(name)}"") continue # if first scope name starts with ""bert"", change it to ""tapas"" if name[0] == ""bert"": name[0] = ""tapas"" pointer = model for m_name in name: if re.fullmatch(r""[A-Za-z]+_\d+"", m_name): scope_names = re.split(r""_(\d+)"", m_name) else: scope_names = [m_name] if scope_names[0] == ""kernel"" or scope_names[0] == ""gamma"": pointer = getattr(pointer, ""weight"") elif scope_names[0] == ""beta"": pointer = getattr(pointer, ""bias"") # cell selection heads elif scope_names[0] == ""output_bias"": if not isinstance(model, TapasForMaskedLM): pointer = getattr(pointer, ""output_bias"") else: pointer = getattr(pointer, ""bias"") elif scope_names[0] == ""output_weights"": pointer = getattr(pointer, ""output_weights"") elif scope_names[0] == ""column_output_bias"": pointer = getattr(pointer, ""column_output_bias"") elif scope_names[0] == ""column_output_weights"": pointer = getattr(pointer, ""column_output_weights"") # aggregation head elif scope_names[0] == ""output_bias_agg"": pointer = getattr(pointer, ""aggregation_classifier"") pointer = getattr(pointer, ""bias"") elif scope_names[0] == ""output_weights_agg"": pointer = getattr(pointer, ""aggregation_classifier"") pointer = getattr(pointer, ""weight"") # classification head elif scope_names[0] == ""output_bias_cls"": pointer = getattr(pointer, ""classifier"") pointer = getattr(pointer, ""bias"") elif scope_names[0] == ""output_weights_cls"": pointer = getattr(pointer, ""classifier"") pointer = getattr(pointer, ""weight"") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f""Skipping {'/'.join(name)}"") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == ""_embeddings"": pointer = getattr(pointer, ""weight"") elif m_name[-13:] in [f""_embeddings_{i}"" for i in range(7)]: pointer = getattr(pointer, ""weight"") elif m_name == ""kernel"": array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f""Pointer shape {pointer.shape} and array shape {array.shape} mismatched"") except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f""Initialize PyTorch weight {name}"") # Added a check to see whether the array is a scalar (because bias terms in Tapas checkpoints can be # scalar => should first be converted to numpy arrays) if np.isscalar(array): array = np.array(array) pointer.data = torch.from_numpy(array) return model " 24484,"def assert_all_metrics(aggregator, minimun_tags=None, hostname=None): for metric, metric_type in METRICS: aggregator.assert_metric(metric, metric_type=getattr(aggregator, metric_type.upper()), hostname=hostname) minimun_tags = minimun_tags or [] for tag in minimun_tags: aggregator.assert_metric_has_tag(metric, tag) for metric, metric_type in OPTIONAL_METRICS: aggregator.assert_metric( metric, metric_type=getattr(aggregator, metric_type.upper()), hostname=hostname, at_least=0 ) aggregator.assert_all_metrics_covered() ","def assert_all_metrics(aggregator, minimun_tags=None, hostname=None): for metric, metric_type in METRICS: aggregator.assert_metric(metric, metric_type=getattr(aggregator, metric_type.upper()), hostname=hostname) minimun_tags = minimun_tags or [] for tag in minimum_tags: aggregator.assert_metric_has_tag(metric, tag) for metric, metric_type in OPTIONAL_METRICS: aggregator.assert_metric( metric, metric_type=getattr(aggregator, metric_type.upper()), hostname=hostname, at_least=0 ) aggregator.assert_all_metrics_covered() " 54820,"def prob(samples: list, excited_state: list) -> float: r""""""Generate probability of observing a Fock state. **Example usage:** >>> excited_state = [0, 2] >>> samples = [[0, 2], [1, 1], [0, 2], [2, 0], [1, 1], [0, 2], [1, 1], [1, 1], [1, 1], [0, 2]] >>> prob(samples, excited_state) 0.4 Args: samples list[list[int]]: a list of samples excited_state (list): a Fock state Returns: float: probability of observing a Fock state in the given samples """""" if len(samples) == 0: raise ValueError(""The samples list must not be empty"") if len(excited_state) == 0: raise ValueError(""The excited state list must not be empty"") if not len(excited_state) == len(samples[0]): raise ValueError(""The number of modes in the samples and the excited state must be equal"") if np.any(np.array(excited_state) < 0): raise ValueError(""The excited state must not contain negative values"") return samples.count(excited_state) / len(samples) ","def prob(samples: list, excited_state: list) -> float: r""""""Estimate probability of observing an excited state. The probability is estimated by calculating the relative frequency of the excited state among the samples. **Example usage:** >>> excited_state = [0, 2] >>> samples = [[0, 2], [1, 1], [0, 2], [2, 0], [1, 1], [0, 2], [1, 1], [1, 1], [1, 1], [0, 2]] >>> prob(samples, excited_state) 0.4 Args: samples list[list[int]]: a list of samples excited_state (list): a Fock state Returns: float: probability of observing a Fock state in the given samples """""" if len(samples) == 0: raise ValueError(""The samples list must not be empty"") if len(excited_state) == 0: raise ValueError(""The excited state list must not be empty"") if not len(excited_state) == len(samples[0]): raise ValueError(""The number of modes in the samples and the excited state must be equal"") if np.any(np.array(excited_state) < 0): raise ValueError(""The excited state must not contain negative values"") return samples.count(excited_state) / len(samples) " 33751,"def _get_resource(container_resources, resource_name, field_name): if field_name not in container_resources: return float(""inf"") resources = container_resources[field_name] matching_keys = [key for key in resources if resource_name in key.lower()] if len(matching_keys) == 0: return float(""inf"") if len(matching_keys) > 2: raise ValueError(f""Multiple {resource_name} types not supported."") resource_key = matching_keys.pop() return _parse_resource(resources[resource_key]) ","def _get_resource(container_resources, resource_name, field_name): if field_name not in container_resources: return float(""inf"") resources = container_resources[field_name] matching_keys = [key for key in resources if resource_name in key.lower()] if len(matching_keys) == 0: return float(""inf"") if len(matching_keys) > 1: raise ValueError(f""Multiple {resource_name} types not supported."") resource_key = matching_keys.pop() return _parse_resource(resources[resource_key]) " 40516,"def imagecopy(source_resource_group_name, source_object_name, target_location, target_resource_group_name, temporary_resource_group_name='image-copy-rg', source_type='image', cleanup='false', parallel_degree=-1, tags=None, target_name=None, target_subscription=None, export_as_snapshot='false', timeout=3600): # get the os disk id from source vm/image logger.warn(""Getting os disk id of the source vm/image"") cli_cmd = prepare_cli_command([source_type, 'show', '--name', source_object_name, '--resource-group', source_resource_group_name]) json_cmd_output = run_cli_command(cli_cmd, return_as_json=True) if json_cmd_output['storageProfile']['dataDisks']: logger.warn( ""Data disks in the source detected, but are ignored by this extension!"") source_os_disk_id = None source_os_disk_type = None try: source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['managedDisk']['id'] if source_os_disk_id is None: raise TypeError source_os_disk_type = ""DISK"" logger.debug(""found %s: %s"", source_os_disk_type, source_os_disk_id) except TypeError: try: source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['blobUri'] if source_os_disk_id is None: raise TypeError source_os_disk_type = ""BLOB"" logger.debug(""found %s: %s"", source_os_disk_type, source_os_disk_id) except TypeError: try: # images created by e.g. image-copy extension source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['snapshot']['id'] if source_os_disk_id is None: raise TypeError source_os_disk_type = ""SNAPSHOT"" logger.debug(""found %s: %s"", source_os_disk_type, source_os_disk_id) except TypeError: pass if source_os_disk_type is None or source_os_disk_id is None: logger.error( 'Unable to locate a supported os disk type in the provided source object') raise CLIError('Invalid OS Disk Source Type') source_os_type = json_cmd_output['storageProfile']['osDisk']['osType'] logger.debug(""source_os_disk_type: %s. source_os_disk_id: %s. source_os_type: %s"", source_os_disk_type, source_os_disk_id, source_os_type) # create source snapshots # TODO: skip creating another snapshot when the source is a snapshot logger.warn(""Creating source snapshot"") source_os_disk_snapshot_name = source_object_name + '_os_disk_snapshot' cli_cmd = prepare_cli_command(['snapshot', 'create', '--name', source_os_disk_snapshot_name, '--resource-group', source_resource_group_name, '--source', source_os_disk_id]) run_cli_command(cli_cmd) # Get SAS URL for the snapshotName logger.warn( ""Getting sas url for the source snapshot with timeout: %d seconds"", timeout) if timeout < 3600: logger.error(""Timeout should be greater than 3600 seconds"") raise CLIError('Invalid Timeout') cli_cmd = prepare_cli_command(['snapshot', 'grant-access', '--name', source_os_disk_snapshot_name, '--resource-group', source_resource_group_name, '--duration-in-seconds', str(timeout)]) json_output = run_cli_command(cli_cmd, return_as_json=True) source_os_disk_snapshot_url = json_output['accessSas'] logger.debug(""source os disk snapshot url: %s"", source_os_disk_snapshot_url) # Start processing in the target locations transient_resource_group_name = temporary_resource_group_name # pick the first location for the temp group transient_resource_group_location = target_location[0].strip() create_resource_group(transient_resource_group_name, transient_resource_group_location, target_subscription) target_locations_count = len(target_location) logger.warn(""Target location count: %s"", target_locations_count) create_resource_group(target_resource_group_name, target_location[0].strip(), target_subscription) try: # try to get a handle on arm's 409s azure_pool_frequency = 5 if target_locations_count >= 5: azure_pool_frequency = 15 elif target_locations_count >= 3: azure_pool_frequency = 10 if parallel_degree == -1: pool = Pool(target_locations_count) else: pool = Pool(min(parallel_degree, target_locations_count)) tasks = [] for location in target_location: # Appending location to target name if multiple locations final_target_name = target_name if target_name and target_locations_count > 1: final_target_name = target_name + location location = location.strip() tasks.append((location, transient_resource_group_name, source_type, source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url, source_os_type, target_resource_group_name, azure_pool_frequency, tags, final_target_name, target_subscription, export_as_snapshot, timeout)) logger.warn(""Starting async process for all locations"") for task in tasks: pool.apply_async(create_target_image, task) pool.close() pool.join() except KeyboardInterrupt: logger.warn('User cancelled the operation') if cleanup: logger.warn('To cleanup temporary resources look for ones tagged with ""image-copy-extension"". \n' 'You can use the following command: az resource list --tag created_by=image-copy-extension') pool.terminate() return # Cleanup if cleanup: logger.warn('Deleting transient resources') # Delete resource group cli_cmd = prepare_cli_command(['group', 'delete', '--no-wait', '--yes', '--name', transient_resource_group_name], subscription=target_subscription) run_cli_command(cli_cmd) # Revoke sas for source snapshot cli_cmd = prepare_cli_command(['snapshot', 'revoke-access', '--name', source_os_disk_snapshot_name, '--resource-group', source_resource_group_name]) run_cli_command(cli_cmd) # Delete source snapshot # TODO: skip this if source is snapshot and not creating a new one cli_cmd = prepare_cli_command(['snapshot', 'delete', '--name', source_os_disk_snapshot_name, '--resource-group', source_resource_group_name]) run_cli_command(cli_cmd) ","def imagecopy(source_resource_group_name, source_object_name, target_location, target_resource_group_name, temporary_resource_group_name='image-copy-rg', source_type='image', cleanup='false', parallel_degree=-1, tags=None, target_name=None, target_subscription=None, export_as_snapshot='false', timeout=3600): # get the os disk id from source vm/image logger.warn(""Getting os disk id of the source vm/image"") cli_cmd = prepare_cli_command([source_type, 'show', '--name', source_object_name, '--resource-group', source_resource_group_name]) json_cmd_output = run_cli_command(cli_cmd, return_as_json=True) if json_cmd_output['storageProfile']['dataDisks']: logger.warn( ""Data disks in the source detected, but are ignored by this extension!"") source_os_disk_id = None source_os_disk_type = None try: source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['managedDisk']['id'] if source_os_disk_id is None: raise TypeError source_os_disk_type = ""DISK"" logger.debug(""found %s: %s"", source_os_disk_type, source_os_disk_id) except TypeError: try: source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['blobUri'] if source_os_disk_id is None: raise TypeError source_os_disk_type = ""BLOB"" logger.debug(""found %s: %s"", source_os_disk_type, source_os_disk_id) except TypeError: try: # images created by e.g. image-copy extension source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['snapshot']['id'] if source_os_disk_id is None: raise TypeError source_os_disk_type = ""SNAPSHOT"" logger.debug(""found %s: %s"", source_os_disk_type, source_os_disk_id) except TypeError: pass if source_os_disk_type is None or source_os_disk_id is None: logger.error( 'Unable to locate a supported os disk type in the provided source object') raise CLIError('Invalid OS Disk Source Type') source_os_type = json_cmd_output['storageProfile']['osDisk']['osType'] logger.debug(""source_os_disk_type: %s. source_os_disk_id: %s. source_os_type: %s"", source_os_disk_type, source_os_disk_id, source_os_type) # create source snapshots # TODO: skip creating another snapshot when the source is a snapshot logger.warn(""Creating source snapshot"") source_os_disk_snapshot_name = source_object_name + '_os_disk_snapshot' cli_cmd = prepare_cli_command(['snapshot', 'create', '--name', source_os_disk_snapshot_name, '--resource-group', source_resource_group_name, '--source', source_os_disk_id]) run_cli_command(cli_cmd) # Get SAS URL for the snapshotName logger.warn( ""Getting sas url for the source snapshot with timeout: %d seconds"", timeout) if timeout < 3600: logger.error(""Timeout should be greater than 3600 seconds"") raise CLIError('Invalid Timeout') cli_cmd = prepare_cli_command(['snapshot', 'grant-access', '--name', source_os_disk_snapshot_name, '--resource-group', source_resource_group_name, '--duration-in-seconds', str(timeout)]) json_output = run_cli_command(cli_cmd, return_as_json=True) source_os_disk_snapshot_url = json_output['accessSas'] logger.debug(""source os disk snapshot url: %s"", source_os_disk_snapshot_url) # Start processing in the target locations transient_resource_group_name = temporary_resource_group_name # pick the first location for the temp group transient_resource_group_location = target_location[0].strip() create_resource_group(transient_resource_group_name, transient_resource_group_location, target_subscription) target_locations_count = len(target_location) logger.warn(""Target location count: %s"", target_locations_count) create_resource_group(target_resource_group_name, target_location[0].strip(), target_subscription) try: # try to get a handle on arm's 409s azure_pool_frequency = 5 if target_locations_count >= 5: azure_pool_frequency = 15 elif target_locations_count >= 3: azure_pool_frequency = 10 if parallel_degree == -1: pool = Pool(target_locations_count) else: pool = Pool(min(parallel_degree, target_locations_count)) tasks = [] for location in target_location: # Appending location to target name if multiple locations final_target_name = target_name if target_name and target_locations_count > 1: final_target_name = target_name + '-' + location location = location.strip() tasks.append((location, transient_resource_group_name, source_type, source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url, source_os_type, target_resource_group_name, azure_pool_frequency, tags, final_target_name, target_subscription, export_as_snapshot, timeout)) logger.warn(""Starting async process for all locations"") for task in tasks: pool.apply_async(create_target_image, task) pool.close() pool.join() except KeyboardInterrupt: logger.warn('User cancelled the operation') if cleanup: logger.warn('To cleanup temporary resources look for ones tagged with ""image-copy-extension"". \n' 'You can use the following command: az resource list --tag created_by=image-copy-extension') pool.terminate() return # Cleanup if cleanup: logger.warn('Deleting transient resources') # Delete resource group cli_cmd = prepare_cli_command(['group', 'delete', '--no-wait', '--yes', '--name', transient_resource_group_name], subscription=target_subscription) run_cli_command(cli_cmd) # Revoke sas for source snapshot cli_cmd = prepare_cli_command(['snapshot', 'revoke-access', '--name', source_os_disk_snapshot_name, '--resource-group', source_resource_group_name]) run_cli_command(cli_cmd) # Delete source snapshot # TODO: skip this if source is snapshot and not creating a new one cli_cmd = prepare_cli_command(['snapshot', 'delete', '--name', source_os_disk_snapshot_name, '--resource-group', source_resource_group_name]) run_cli_command(cli_cmd) " 58082,"def get_object( topology: Topology, object_type: ObjectTypeEnum, device_filter_string: str = None, object_name: str = None, parent: str = None, use_regex: str = None ) -> List[PanosObjectReference]: """"""Searches and returns a reference for the given object type and name. If no name is provided, all objects of the given type are returned. Note this only returns a reference, and not the complete object information. :param topology: `Topology` instance !no-auto-argument :param object_name: The name of the object refernce to return if looking for a specific object. Supports regex if ""use_regex"" is set. :param object_type: The type of object to search; see https://pandevice.readthedocs.io/en/latest/module-objects.html :param device_filter_string: If provided, only objects from the given device are returned. :param parent: The parent vsys or device group to search. if not provided, all will be returned. :param use_regex: Enables regex matching on object name. """""" return ObjectGetter.get_object_reference( topology=topology, device_filter_string=device_filter_string, object_name=object_name, # Fixing the ignore below would rfequire adding union handling to code generation script. object_type=object_type, # type: ignore container_filter=parent, use_regex=use_regex ) ","def get_object( topology: Topology, object_type: ObjectTypeEnum, device_filter_string: Optional[str] = None, object_name: Optional[str] = None, parent: Optional[str] = None, use_regex: Optional[str] = None ) -> List[PanosObjectReference]: """"""Searches and returns a reference for the given object type and name. If no name is provided, all objects of the given type are returned. Note this only returns a reference, and not the complete object information. :param topology: `Topology` instance !no-auto-argument :param object_name: The name of the object refernce to return if looking for a specific object. Supports regex if ""use_regex"" is set. :param object_type: The type of object to search; see https://pandevice.readthedocs.io/en/latest/module-objects.html :param device_filter_string: If provided, only objects from the given device are returned. :param parent: The parent vsys or device group to search. if not provided, all will be returned. :param use_regex: Enables regex matching on object name. """""" return ObjectGetter.get_object_reference( topology=topology, device_filter_string=device_filter_string, object_name=object_name, # Fixing the ignore below would rfequire adding union handling to code generation script. object_type=object_type, # type: ignore container_filter=parent, use_regex=use_regex ) " 34415,"def write_temp_file(contents, suffix, mode=""w+"") -> Text: filename = os.path.join(tempfile.gettempdir(), os.urandom(24).hex() + suffix) with open(filename, mode) as f: f.write(contents) f.flush() return filename ","def write_temp_file(contents: AnyStr, suffix: Text, mode: Optional[Text] = ""w+"") -> Text: filename = os.path.join(tempfile.gettempdir(), os.urandom(24).hex() + suffix) with open(filename, mode) as f: f.write(contents) f.flush() return filename " 35969,"def delete_nodes( pks, verbosity=0, dry_run=False, force=False, create_forward=True, call_calc_forward=False, call_work_forward=False ): """""" Delete nodes by a list of pks. This command will delete not only the specified nodes, but also the ones that are linked to these and should be also deleted in order to keep a consistent provenance according to the rules explained in the concepts section of the documentation. In summary: 1. If a DATA node is deleted, any process nodes linked to it will also be deleted. 2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes (outputs) will be deleted by default but this can be disabled. 3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by default, but deletion of either of both kind of connected nodes can be enabled. These rules are 'recursive', so if a CALC node is deleted, then its output DATA nodes will be deleted as well, and then any CALC node that may have those as inputs, and so on. :param pks: a list of the PKs of the nodes to delete :param bool force: do not ask for confirmation to delete nodes. :param int verbosity: 0 prints nothing, 1 prints just sums and total, 2 prints individual nodes. :param bool create_forward: This will delete all output data created by any deleted calculation. :param bool call_calc_forward: This will also delete all calculations called by any workflow that is going to be deleted. Note that when you delete a workflow, also all parent workflows are deleted (recursively). Therefore, setting this flag to True may delete calculations that are 'unrelated' to what has been chosen to be deleted, just because they are connected at some point in the upwards provenance. Use with care, and it is advisable to never combine it with force. :param bool call_work_forward: This will also delete all calculations called by any workflow that is going to be deleted. The same disclaimer as forward_calcs applies here as well. :param bool dry_run: Do not delete, a dry run, with statistics printed according to verbosity levels. :param bool force: Do not ask for confirmation to delete nodes. """""" # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements from aiida.backends.utils import delete_nodes_and_connections from aiida.common import exceptions from aiida.common.links import LinkType from aiida.orm import Node, QueryBuilder, load_node starting_pks = [] for pk in pks: try: load_node(pk) except exceptions.NotExistent: echo.echo_warning('warning: node with pk<{}> does not exist, skipping'.format(pk)) else: starting_pks.append(pk) # An empty set might be problematic for the queries done below. if not starting_pks: if verbosity: echo.echo('Nothing to delete') return follow_upwards = [] follow_upwards.append(LinkType.CREATE.value) follow_upwards.append(LinkType.RETURN.value) follow_upwards.append(LinkType.CALL_CALC.value) follow_upwards.append(LinkType.CALL_WORK.value) follow_downwards = [] follow_downwards.append(LinkType.INPUT_CALC.value) follow_downwards.append(LinkType.INPUT_WORK.value) if create_forward: follow_downwards.append(LinkType.CREATE.value) if call_calc_forward: follow_downwards.append(LinkType.CALL_CALC.value) if call_work_forward: follow_downwards.append(LinkType.CALL_WORK.value) links_upwards = {'type': {'in': follow_upwards}} links_downwards = {'type': {'in': follow_downwards}} operational_set = set().union(set(starting_pks)) accumulator_set = set().union(set(starting_pks)) while operational_set: new_pks_set = set() query_nodes = QueryBuilder() query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources') query_nodes.append( Node, filters={'id': { '!in': accumulator_set }}, edge_filters=links_downwards, with_incoming='sources', project='id' ) new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall())) query_nodes = QueryBuilder() query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources') query_nodes.append( Node, filters={'id': { '!in': accumulator_set }}, edge_filters=links_upwards, with_outgoing='sources', project='id' ) new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall())) operational_set = new_pks_set.difference(accumulator_set) accumulator_set = new_pks_set.union(accumulator_set) pks_set_to_delete = accumulator_set if verbosity > 0: echo.echo( 'I {} delete {} node{}'.format( 'would' if dry_run else 'will', len(pks_set_to_delete), 's' if len(pks_set_to_delete) > 1 else '' ) ) if verbosity > 1: builder = QueryBuilder().append( Node, filters={'id': { 'in': pks_set_to_delete }}, project=('uuid', 'id', 'node_type', 'label') ) echo.echo('The nodes I {} delete:'.format('would' if dry_run else 'will')) for uuid, pk, type_string, label in builder.iterall(): try: short_type_string = type_string.split('.')[-2] except IndexError: short_type_string = type_string echo.echo(' {} {} {} {}'.format(uuid, pk, short_type_string, label)) if dry_run: if verbosity > 0: echo.echo('\nThis was a dry run, exiting without deleting anything') return # Asking for user confirmation here if force: pass else: echo.echo_warning('YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!'.format(len(pks_set_to_delete))) if not click.confirm('Shall I continue?'): echo.echo('Exiting without deleting') return # Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later, # so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access if verbosity > 0: echo.echo('I am starting node deletion.') delete_nodes_and_connections(pks_set_to_delete) if verbosity > 0: echo.echo('I have finished node deletion and I am starting folder deletion.') # If we are here, we managed to delete the entries from the DB. # I can now delete the folders for repository in repositories: repository.erase(force=True) if verbosity > 0: echo.echo('I have finished folder deletion. Deletion completed.') ","def delete_nodes( pks, verbosity=0, dry_run=False, force=False, create_forward=True, call_calc_forward=False, call_work_forward=False ): """""" Delete nodes by a list of pks. This command will delete not only the specified nodes, but also the ones that are linked to these and should be also deleted in order to keep a consistent provenance according to the rules explained in the concepts section of the documentation. In summary: 1. If a DATA node is deleted, any process nodes linked to it will also be deleted. 2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes (outputs) will be deleted by default but this can be disabled. 3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by default, but deletion of either of both kind of connected nodes can be enabled. These rules are 'recursive', so if a CALC node is deleted, then its output DATA nodes will be deleted as well, and then any CALC node that may have those as inputs, and so on. :param pks: a list of the PKs of the nodes to delete :param bool force: do not ask for confirmation to delete nodes. :param int verbosity: 0 prints nothing, 1 prints just sums and total, 2 prints individual nodes. :param bool create_forward: This will delete all output data created by any deleted calculation. :param bool call_calc_forward: This will also delete all calculations called by any workflow that is going to be deleted. Note that when you delete a workflow, also all parent workflows are deleted (recursively). Therefore, setting this flag to True may delete calculations that are 'unrelated' to what has been chosen to be deleted, just because they are connected at some point in the upwards provenance. Use with care, and it is advisable to never combine it with force. :param bool call_work_forward: This will also delete all calculations called by any workflow that is going to be deleted. The same disclaimer as forward_calcs applies here as well. :param bool dry_run: Do not delete, a dry run, with statistics printed according to verbosity levels. :param bool force: Do not ask for confirmation to delete nodes. """""" # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements from aiida.backends.utils import delete_nodes_and_connections from aiida.common import exceptions from aiida.common.links import LinkType from aiida.orm import Node, QueryBuilder, load_node starting_pks = [] for pk in pks: try: load_node(pk) except exceptions.NotExistent: echo.echo_warning('warning: node with pk<{}> does not exist, skipping'.format(pk)) else: starting_pks.append(pk) # An empty set might be problematic for the queries done below. if not starting_pks: if verbosity: echo.echo('Nothing to delete') return follow_upwards = [] follow_upwards.append(LinkType.CREATE.value) follow_upwards.append(LinkType.RETURN.value) follow_upwards.append(LinkType.CALL_CALC.value) follow_upwards.append(LinkType.CALL_WORK.value) follow_downwards = [] follow_downwards.append(LinkType.INPUT_CALC.value) follow_downwards.append(LinkType.INPUT_WORK.value) if create_forward: follow_downwards.append(LinkType.CREATE.value) if call_calc_forward: follow_downwards.append(LinkType.CALL_CALC.value) if call_work_forward: follow_downwards.append(LinkType.CALL_WORK.value) links_upwards = {'type': {'in': follow_upwards}} links_downwards = {'type': {'in': follow_downwards}} operational_set = set().union(set(starting_pks)) accumulator_set = set().union(set(starting_pks)) while operational_set: new_pks_set = set() query_nodes = QueryBuilder() query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources') query_nodes.append( Node, filters={'id': { '!in': accumulator_set }}, edge_filters=links_downwards, with_incoming='sources', project='id' ) new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall())) query_nodes = QueryBuilder() query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources') query_nodes.append( Node, filters={'id': { '!in': accumulator_set }}, edge_filters=links_upwards, with_outgoing='sources', project='id' ) new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall())) operational_set = new_pks_set.difference(accumulator_set) accumulator_set = new_pks_set.union(accumulator_set) pks_set_to_delete = accumulator_set if verbosity > 0: echo.echo( 'I {} delete {} node{}'.format( 'would' if dry_run else 'will', len(pks_set_to_delete), 's' if len(pks_set_to_delete) > 1 else '' ) ) if verbosity > 1: builder = QueryBuilder().append( Node, filters={'id': { 'in': pks_set_to_delete }}, project=('uuid', 'id', 'node_type', 'label') ) echo.echo('The nodes I {} delete:'.format('would' if dry_run else 'will')) for uuid, pk, type_string, label in builder.iterall(): try: short_type_string = type_string.split('.')[-2] except IndexError: short_type_string = type_string echo.echo(' {} {} {} {}'.format(uuid, pk, short_type_string, label)) if dry_run: if verbosity > 0: echo.echo('\nThis was a dry run, exiting without deleting anything') return # Asking for user confirmation here if force: pass else: echo.echo_warning('YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!'.format(len(pks_set_to_delete))) if not click.confirm('Shall I continue?'): echo.echo('Exiting without deleting') return # Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later, # so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access if verbosity > 0: echo.echo('Starting node deletion...') delete_nodes_and_connections(pks_set_to_delete) if verbosity > 0: echo.echo('I have finished node deletion and I am starting folder deletion.') # If we are here, we managed to delete the entries from the DB. # I can now delete the folders for repository in repositories: repository.erase(force=True) if verbosity > 0: echo.echo('I have finished folder deletion. Deletion completed.') " 44862,"def bbox_rotate(bbox, angle, rows, cols, interpolation): """"""Rotates a bounding box by angle degrees Args: bbox (tuple): A tuple (x_min, y_min, x_max, y_max). angle (int): Angle of rotation rows (int): Image rows. cols (int): Image cols. interpolation (int): interpolation method. return a tuple (x_min, y_min, x_max, y_max) """""" x = np.array([bbox[0], bbox[2], bbox[2], bbox[0]]) y = np.array([bbox[1], bbox[1], bbox[3], bbox[3]]) x = x - 0.5 y = y - 0.5 angle = np.deg2rad(angle) x_t = np.cos(angle) * x + np.sin(angle) * y y_t = -np.sin(angle) * x + np.cos(angle) * y x_t = x_t + 0.5 y_t = y_t + 0.5 return [min(x_t), min(y_t), max(x_t), max(y_t)] ","def bbox_rotate(bbox, angle, rows, cols, interpolation): """"""Rotates a bounding box by angle degrees Args: bbox (tuple): A tuple (x_min, y_min, x_max, y_max). angle (int): Angle of rotation in degrees rows (int): Image rows. cols (int): Image cols. interpolation (int): interpolation method. return a tuple (x_min, y_min, x_max, y_max) """""" x = np.array([bbox[0], bbox[2], bbox[2], bbox[0]]) y = np.array([bbox[1], bbox[1], bbox[3], bbox[3]]) x = x - 0.5 y = y - 0.5 angle = np.deg2rad(angle) x_t = np.cos(angle) * x + np.sin(angle) * y y_t = -np.sin(angle) * x + np.cos(angle) * y x_t = x_t + 0.5 y_t = y_t + 0.5 return [min(x_t), min(y_t), max(x_t), max(y_t)] " 30484,"def main(): try: indicator_data = demisto.args().get(""indicator"") demisto.results(extract_engines_data_from_indicator(indicator_data)) except Exception as e: demisto.debug(f""PostiveDetectionsVSDetectiongEngines failed with [{e}]"") exit_with_message(str(e)) ","def main(): try: indicator_data = demisto.args().get(""indicator"") demisto.results(extract_engines_data_from_indicator(indicator_data)) except Exception as e: demisto.error(f""PostiveDetectionsVSDetectiongEngines failed with [{e}]"") exit_with_message(str(e)) " 23855,"def test_tool_requires_raise_exception_if_exist_both_require_and_build_one(): """""" Testing if same dependency exists in both require and build require (without suffix) """""" client = TestClient() conanfile = textwrap.dedent("""""" from conan import ConanFile class PkgConfigConan(ConanFile): def package_info(self): self.cpp_info.libs = [""libtool""] """""") client.save({""conanfile.py"": conanfile}) client.run(""create . tool/1.0@"") conanfile = textwrap.dedent("""""" from conan import ConanFile from conan.tools.gnu import PkgConfigDeps class PkgConfigConan(ConanFile): name = ""demo"" version = ""1.0"" def requirements(self): self.requires(""tool/1.0"") def build_requirements(self): self.build_requires(""tool/1.0"") def generate(self): tc = PkgConfigDeps(self) tc.build_context_activated = [""tool""] tc.generate() """""") client.save({""conanfile.py"": conanfile}, clean_first=True) with pytest.raises(Exception) as e: client.run(""install . -pr:h default -pr:b default"") assert ""The packages ['tool'] exist both as 'require' and as 'build require'"" in str(e.value) ","def test_tool_requires_raise_exception_if_exist_both_require_and_build_one(): """""" Testing if same dependency exists in both require and build require (without suffix) """""" client = TestClient() conanfile = textwrap.dedent("""""" from conan import ConanFile class PkgConfigConan(ConanFile): def package_info(self): self.cpp_info.libs = [""libtool""] """""") client.save({""conanfile.py"": conanfile}) client.run(""create . tool/1.0@"") conanfile = textwrap.dedent("""""" from conan import ConanFile from conan.tools.gnu import PkgConfigDeps class PkgConfigConan(ConanFile): name = ""demo"" version = ""1.0"" def requirements(self): self.requires(""tool/1.0"") def build_requirements(self): self.build_requires(""tool/1.0"") def generate(self): tc = PkgConfigDeps(self) tc.build_context_activated = [""tool""] tc.generate() """""") client.save({""conanfile.py"": conanfile}, clean_first=True) client.run(""install . -pr:h default -pr:b default"", assert_error=True) assert ""The packages ['tool'] exist both as 'require' and as 'build require'"" in str(e.value) " 43790,"def pauli_mult(pauli_1, pauli_2, wire_map=None): """"""Multiply two Pauli words together. Two Pauli operations can be multiplied together by taking the additive OR of their binary symplectic representations. Args: pauli_1 (.Operation): A Pauli word. pauli_2 (.Operation): A Pauli word to multiply with the first one. wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli word as keys, and unique integer labels as their values. If no wire map is provided, the map will be constructed from the set of wires acted on by the input Pauli words. Returns: .Operation: The product of pauli_1 and pauli_2 as a Pauli word (ignoring the global phase). **Example** This function enables multiplication of Pauli group elements at the level of Pauli words, rather than matrices. For example, >>> from pennylane.pauli import pauli_mult >>> pauli_1 = qml.PauliX(0) @ qml.PauliZ(1) >>> pauli_2 = qml.PauliY(0) @ qml.PauliZ(1) >>> product = pauli_mult(pauli_1, pauli_2) >>> print(product) PauliZ(wires=[0]) """""" # If no wire map is specified, generate one from the union of wires # in both Paulis. if wire_map is None: wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels) wire_map = {label: i for i, label in enumerate(wire_labels)} # Check if pauli_1 and pauli_2 are the same; if so, the result is the Identity if are_identical_pauli_words(pauli_1, pauli_2): first_wire = list(wire_map.keys())[0] return Identity(first_wire) # Compute binary symplectic representations pauli_1_binary = pauli_to_binary(pauli_1, wire_map=wire_map) pauli_2_binary = pauli_to_binary(pauli_2, wire_map=wire_map) bin_symp_1 = np.array([int(x) for x in pauli_1_binary]) bin_symp_2 = np.array([int(x) for x in pauli_2_binary]) # Shorthand for bitwise XOR of numpy arrays pauli_product = bin_symp_1 ^ bin_symp_2 return binary_to_pauli(pauli_product, wire_map=wire_map) ","def pauli_mult(pauli_1, pauli_2, wire_map=None): """"""Multiply two Pauli words together. Two Pauli operations can be multiplied together by taking the additive OR of their binary symplectic representations. Args: pauli_1 (.Operation): A Pauli word. pauli_2 (.Operation): A Pauli word to multiply with the first one. wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli word as keys, and unique integer labels as their values. If no wire map is provided, the map will be constructed from the set of wires acted on by the input Pauli words. Returns: .Operation: The product of pauli_1 and pauli_2 as a Pauli word (ignoring the global phase). **Example** This function enables multiplication of Pauli group elements at the level of Pauli words, rather than matrices. For example, >>> from pennylane.grouping import pauli_mult >>> pauli_1 = qml.PauliX(0) @ qml.PauliZ(1) >>> pauli_2 = qml.PauliY(0) @ qml.PauliZ(1) >>> product = pauli_mult(pauli_1, pauli_2) >>> print(product) PauliZ(wires=[0]) """""" # If no wire map is specified, generate one from the union of wires # in both Paulis. if wire_map is None: wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels) wire_map = {label: i for i, label in enumerate(wire_labels)} # Check if pauli_1 and pauli_2 are the same; if so, the result is the Identity if are_identical_pauli_words(pauli_1, pauli_2): first_wire = list(wire_map.keys())[0] return Identity(first_wire) # Compute binary symplectic representations pauli_1_binary = pauli_to_binary(pauli_1, wire_map=wire_map) pauli_2_binary = pauli_to_binary(pauli_2, wire_map=wire_map) bin_symp_1 = np.array([int(x) for x in pauli_1_binary]) bin_symp_2 = np.array([int(x) for x in pauli_2_binary]) # Shorthand for bitwise XOR of numpy arrays pauli_product = bin_symp_1 ^ bin_symp_2 return binary_to_pauli(pauli_product, wire_map=wire_map) " 4386,"def _check_file(fname, overwrite): """"""Prevent overwrites."""""" if op.isfile(fname) and not overwrite: raise IOError('File %s exists, use --overwrite to overwrite it' % fname) ","def _check_file(fname, overwrite): """"""Prevent overwrites."""""" if op.isfile(fname) and not overwrite: raise IOError(f'File {fname} exists, use --overwrite to overwrite it') " 33950,"def extract_deployments_from_serve_dag( serve_dag_root: DAGNode, ) -> List[Deployment]: """"""Extract deployment python objects from a transformed serve DAG. Should only be called after `transform_ray_dag_to_serve_dag`, otherwise nothing to return. Args: serve_dag_root (DAGNode): Transformed serve dag root node. Returns: List[Deployment]: List of deployment python objects fetched from serve dag. """""" deployments = {} def extractor(dag_node): if isinstance(dag_node, DeploymentNode): deployment = dag_node._body # In case same deployment is used in multiple DAGNodes deployments[deployment.name] = deployment # elif DeploymentMethodNode serve_dag_root._apply_recursive(lambda node: extractor(node)) return list(deployments.values()) ","def extract_deployments_from_serve_dag( serve_dag_root: DAGNode, ) -> List[Deployment]: """"""Extract deployment python objects from a transformed serve DAG. Should only be called after `transform_ray_dag_to_serve_dag`, otherwise nothing to return. Args: serve_dag_root (DAGNode): Transformed serve dag root node. Returns: List[Deployment]: List of deployment python objects fetched from serve dag. """""" deployments = {} def extractor(dag_node): if isinstance(dag_node, DeploymentNode): deployment = dag_node._body # In case same deployment is used in multiple DAGNodes deployments[deployment.name] = deployment # elif DeploymentMethodNode serve_dag_root._apply_recursive(extractor) return list(deployments.values()) " 30511,"def main(): params = {k: v for k, v in demisto.params().items() if v is not None} params['indicator_type'] = FeedIndicatorType.IP params['url'] = 'http://danger.rulez.sk/projects/bruteforceblocker/blist.php' params['ignore_regex'] = ""^#.*"" params['indicator'] = json.dumps({ ""regex"": r""^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"", }) fields = json.dumps({ ""upatedate"": { ""regex"": r""(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})"", ""transform"": ""\\1"" } }) params['fields'] = fields # Call the main execution of the HTTP API module. feed_main('BruteForceBlocker Feed', params, 'bruteforceblocker-') ","def main(): params = {k: v for k, v in demisto.params().items() if v is not None} params['indicator_type'] = FeedIndicatorType.IP params['url'] = 'http://danger.rulez.sk/projects/bruteforceblocker/blist.php' params['ignore_regex'] = ""^#.*"" params['indicator'] = json.dumps({ ""regex"": r""^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"", }) fields = json.dumps({ ""updatedate"": { ""regex"": r""(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})"", ""transform"": ""\\1"" } }) params['fields'] = fields # Call the main execution of the HTTP API module. feed_main('BruteForceBlocker Feed', params, 'bruteforceblocker-') " 28022,"def strip_driveletter(full_path): """"""Removes drive letter from path if it has one """""" _, tail = ntpath.splitdrive(full_path) return tail ","def strip_drive_letter(full_path): """"""Removes drive letter from path if it has one """""" _, tail = ntpath.splitdrive(full_path) return tail " 12896,"def patch_pagination_args(field: DjangoConnectionField): """"""Add descriptions to pagination arguments in a connection field. By default Graphene's connection fields comes without description for pagination arguments. This functions patches those fields to add the descriptions. """""" field.args[""first""].description = ""Returns the first n elements from the list."" field.args[""last""].description = ""Returns the last n elements from the list."" field.args[ ""before"" ].description = ( ""Returns the elements in the list that come before the specified cursor."" ) field.args[ ""after"" ].description = ( ""Returns the elements in the list that come after the specified cursor."" ) ","def patch_pagination_args(field: DjangoConnectionField): """"""Add descriptions to pagination arguments in a connection field. By default Graphene's connection fields comes without description for pagination arguments. This functions patches those fields to add the descriptions. """""" field.args[""first""].description = ""Returns the first n elements from the list."" field.args[""last""].description = ""Returns the last n elements from the list."" field.args[ ""before"" ].description = ( ""Return the elements in the list that come before the specified cursor."" ) field.args[ ""after"" ].description = ( ""Returns the elements in the list that come after the specified cursor."" ) " 33082,"def test_init_erroneous_property(): """"""Test that properties that raise an exception don’t mess up initialisation"""""" config = {""example_item"": ""test""} skill = _TestSkill(None, config) assert skill.opsdroid is None assert skill.config[""example_item""] == ""test"" ","def test_init_erroneous_property(): """"""Test that properties that raise an exception don’t mess up initialisation."""""" config = {""example_item"": ""test""} skill = _TestSkill(None, config) assert skill.opsdroid is None assert skill.config[""example_item""] == ""test"" " 54461,"def _generate_contour_subplot( trials: List[FrozenTrial], x_param: str, y_param: str, direction: StudyDirection, param_values_range: dict, ) -> Tuple[""Contour"", ""Scatter""]: x_indices = sorted(list({t.params[x_param] for t in trials if x_param in t.params})) y_indices = sorted(list({t.params[y_param] for t in trials if y_param in t.params})) if len(x_indices) < 2: _logger.warning(""Param {} unique value length is less than 2."".format(x_param)) return go.Contour(), go.Scatter() if len(y_indices) < 2: _logger.warning(""Param {} unique value length is less than 2."".format(y_param)) return go.Contour(), go.Scatter() x_range = param_values_range[x_param] x_indices = [x_range[0]] + x_indices + [x_range[1]] y_range = param_values_range[y_param] y_indices = [y_range[0]] + y_indices + [y_range[1]] z = [[float(""nan"") for _ in range(len(x_indices))] for _ in range(len(y_indices))] x_values = [] y_values = [] for trial in trials: if x_param not in trial.params or y_param not in trial.params: continue x_values.append(trial.params[x_param]) y_values.append(trial.params[y_param]) x_i = x_indices.index(trial.params[x_param]) y_i = y_indices.index(trial.params[y_param]) if isinstance(trial.value, int): value = float(trial.value) elif isinstance(trial.value, float): value = trial.value else: raise ValueError( ""Trial{} has COMPLETE state, but its value is non-numeric."".format(trial.number) ) z[y_i][x_i] = value # TODO(Yanase): Use reversescale argument to reverse colorscale if Plotly's bug is fixed. # If contours_coloring='heatmap' is specified, reversesecale argument of go.Contour does not # work correctly. See https://github.com/pfnet/optuna/issues/606. colorscale = plotly.colors.PLOTLY_SCALES[""Blues""] if direction == StudyDirection.MINIMIZE: colorscale = [[1 - t[0], t[1]] for t in colorscale] colorscale.reverse() contour = go.Contour( x=x_indices, y=y_indices, z=z, colorbar={""title"": ""Objective Value""}, colorscale=colorscale, connectgaps=True, contours_coloring=""heatmap"", hoverinfo=""none"", line_smoothing=1.3, ) scatter = go.Scatter( x=x_values, y=y_values, marker={""line"": {""width"": 0.5, ""color"": ""Grey""}, ""color"": ""black""}, mode=""markers"", showlegend=False, ) return (contour, scatter) ","def _generate_contour_subplot( trials: List[FrozenTrial], x_param: str, y_param: str, direction: StudyDirection, param_values_range: dict, ) -> Tuple[""Contour"", ""Scatter""]: x_indices = sorted(list({t.params[x_param] for t in trials if x_param in t.params})) y_indices = sorted(list({t.params[y_param] for t in trials if y_param in t.params})) if len(x_indices) < 2: _logger.warning(""Param {} unique value length is less than 2."".format(x_param)) return go.Contour(), go.Scatter() if len(y_indices) < 2: _logger.warning(""Param {} unique value length is less than 2."".format(y_param)) return go.Contour(), go.Scatter() # Padding to the plot. x_range = param_values_range[x_param] x_indices = [x_range[0]] + x_indices + [x_range[1]] y_range = param_values_range[y_param] y_indices = [y_range[0]] + y_indices + [y_range[1]] z = [[float(""nan"") for _ in range(len(x_indices))] for _ in range(len(y_indices))] x_values = [] y_values = [] for trial in trials: if x_param not in trial.params or y_param not in trial.params: continue x_values.append(trial.params[x_param]) y_values.append(trial.params[y_param]) x_i = x_indices.index(trial.params[x_param]) y_i = y_indices.index(trial.params[y_param]) if isinstance(trial.value, int): value = float(trial.value) elif isinstance(trial.value, float): value = trial.value else: raise ValueError( ""Trial{} has COMPLETE state, but its value is non-numeric."".format(trial.number) ) z[y_i][x_i] = value # TODO(Yanase): Use reversescale argument to reverse colorscale if Plotly's bug is fixed. # If contours_coloring='heatmap' is specified, reversesecale argument of go.Contour does not # work correctly. See https://github.com/pfnet/optuna/issues/606. colorscale = plotly.colors.PLOTLY_SCALES[""Blues""] if direction == StudyDirection.MINIMIZE: colorscale = [[1 - t[0], t[1]] for t in colorscale] colorscale.reverse() contour = go.Contour( x=x_indices, y=y_indices, z=z, colorbar={""title"": ""Objective Value""}, colorscale=colorscale, connectgaps=True, contours_coloring=""heatmap"", hoverinfo=""none"", line_smoothing=1.3, ) scatter = go.Scatter( x=x_values, y=y_values, marker={""line"": {""width"": 0.5, ""color"": ""Grey""}, ""color"": ""black""}, mode=""markers"", showlegend=False, ) return (contour, scatter) " 52552,"def pick_username_resource(hs: ""HomeServer"") -> Resource: """"""Factory method to generate the username picker resource. This resource gets mounted under /_synapse/client/pick_username. The top-level resource is just a File resource which serves up the static files in the resources ""res"" directory, but it has a couple of children: * ""submit"", which does the mechanics of registering the new user, and redirects the browser back to the client URL * ""check"": checks if a userid is free. """""" # XXX should we make this path customisable so that admins can restyle it? base_path = pkg_resources.resource_filename(""synapse"", ""res/username_picker"") res = File(base_path) res.putChild(b""submit"", SubmitResource(hs)) res.putChild(b""check"", AvailabilityCheckResource(hs)) return res ","def pick_username_resource(hs: ""HomeServer"") -> Resource: """"""Factory method to generate the username picker resource. This resource gets mounted under /_synapse/client/pick_username. The top-level resource is just a File resource which serves up the static files in the resources ""res"" directory, but it has a couple of children: * ""submit"", which does the mechanics of registering the new user, and redirects the browser back to the client URL * ""check"": checks if a userid is free. """""" # XXX should we make this path customisable so that admins can restyle it? base_path = pkg_resources.resource_filename(""synapse"", ""res/username_picker"") res = File(base_path) res.putChild(b""submit"", SubmitResource(hs)) res.putChild(b""check"", AvailabilityCheckResource(hs)) return res " 42057,"def create_study(n_objectives, seed): directions = [""minimize"" for _ in range(n_objectives)] sampler_name = sys.argv[1] # Sampler. sampler_cls = getattr( optuna.multi_objective.samplers, sampler_name, getattr(optuna.integration, sampler_name, None), ) if sampler_cls is None: raise ValueError(""Unknown sampler: {}."".format(sampler_name)) # TODO(drumehiron): sampler_kwargs # sampler_kwargs = json.loads(sys.argv[2]) # try: # sampler_kwargs[""seed""] = seed # sampler = sampler_cls(**sampler_kwargs) # except: # del sampler_kwargs[""seed""] # sampler = sampler_cls(**sampler_kwargs) sampler = sampler_cls() return optuna.multi_objective.create_study(directions=directions, sampler=sampler) ","def create_study(n_objectives, seed): directions = [""minimize"" for _ in range(n_objectives)] sampler_name = sys.argv[1] # Sampler. sampler_cls = getattr( optuna.samplers, sampler_name, getattr(optuna.integration, sampler_name, None), ) if sampler_cls is None: raise ValueError(""Unknown sampler: {}."".format(sampler_name)) # TODO(drumehiron): sampler_kwargs # sampler_kwargs = json.loads(sys.argv[2]) # try: # sampler_kwargs[""seed""] = seed # sampler = sampler_cls(**sampler_kwargs) # except: # del sampler_kwargs[""seed""] # sampler = sampler_cls(**sampler_kwargs) sampler = sampler_cls() return optuna.multi_objective.create_study(directions=directions, sampler=sampler) " 19998,"def test_plantcv_transform_find_color_card_optional_parameters(): # Load rgb image rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD)) # Test cache directory cache_dir = os.path.join(TEST_TMPDIR, ""test_plantcv_transform_find_color_card"") os.mkdir(cache_dir) pcv.params.debug_outdir = cache_dir # Test with threshold ='normal' df1, start1, space1 = pcv.transform.find_color_card(img=rgb_img, threshold='normal', blurry=True, background='light') _ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start1, spacing=space1, nrows=6, ncols=4, exclude=[20, 0]) # Test with threshold='otsu' df2, start2, space2 = pcv.transform.find_color_card(img=rgb_img, threshold='otsu', blurry=True) _ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start2, spacing=space2, nrows=6, ncols=4, exclude=[20, 0]) # Test with debug = None pcv.params.debug = None mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start2, spacing=space2, nrows=6, ncols=4, exclude=[20, 0]) assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220], dtype=np.uint8))) ","def test_plantcv_transform_find_color_card_optional_parameters(): # Load rgb image rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD)) # Test cache directory cache_dir = os.path.join(TEST_TMPDIR, ""test_plantcv_transform_find_color_card"") os.mkdir(cache_dir) pcv.params.debug_outdir = cache_dir # Test with threshold ='normal' df1, start1, space1 = pcv.transform.find_color_card(img=rgb_img, threshold='normal', blurry=True, background='light') _ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start1, spacing=space1, nrows=6, ncols=4, exclude=[20, 0]) # Test with threshold='otsu' df2, start2, space2 = pcv.transform.find_color_card(rgb_img=rgb_img, threshold='otsu', blurry=True) _ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start2, spacing=space2, nrows=6, ncols=4, exclude=[20, 0]) # Test with debug = None pcv.params.debug = None mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start2, spacing=space2, nrows=6, ncols=4, exclude=[20, 0]) assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220], dtype=np.uint8))) " 30697,"def get_indicators_command(client: Client) -> COMMAND_OUTPUT: """""" Get indicator form ThreatConnect, Able to change limit and offset by command arguments. Args: client: ThreatConnect client. Returns: str: Human readable. dict: Operation entry context. dict: Operation raw response. """""" raw_response: Iterator[Any] = client.get_indicators( owners=argToList(demisto.getArg('owners') or demisto.getParam('owners')), limit=demisto.getArg('limit'), offset=demisto.getArg('offset')) readable_output: str = tableToMarkdown(name=f""{INTEGRATION_NAME} - Indicators"", t=[parse_indicator(indicator) for indicator in raw_response]) return readable_output, {}, list(raw_response) ","def get_indicators_command(client: Client) -> COMMAND_OUTPUT: """""" Get indicator from ThreatConnect, Able to change limit and offset by command arguments. Args: client: ThreatConnect client. Returns: str: Human readable. dict: Operation entry context. dict: Operation raw response. """""" raw_response: Iterator[Any] = client.get_indicators( owners=argToList(demisto.getArg('owners') or demisto.getParam('owners')), limit=demisto.getArg('limit'), offset=demisto.getArg('offset')) readable_output: str = tableToMarkdown(name=f""{INTEGRATION_NAME} - Indicators"", t=[parse_indicator(indicator) for indicator in raw_response]) return readable_output, {}, list(raw_response) " 52004,"def parse_arguments(s, **kwargs): """""" This method takes a string and parses it as if it were an argument list to a function. It supports both positional and named arguments. Values are automatically converted to int or float if possible. Values surrounded by single or double quotes are treated as strings. Any other value is wrapped in a ""FunctionArgument"" class for later processing. Args: s (str): The string to convert. Returns: (list, dict): A tuple containing a list of arguments (list) and named arguments (dict). """""" global _ARG_ESCAPE_SIGN args_list = [] args_dict = {} # State (general) inside = (False, None) # Are we inside a quoted string? What is the quoted character? skip = False # Skip the current parameter? escape = False # Was the escape key used? is_string = False # Have we been inside a quoted string? temp = """" # Buffer key = None # Key (for named parameter) def _parse_value(temp): ret = temp.strip() if not is_string: try: ret = int(ret) except ValueError: try: ret = float(ret) except ValueError: if ret != """": return FunctionArgument(ret) return ret def _add_value(skip, key, args_list, args_dict, temp): if not skip: # Record value based on whether named parameters mode is set or not. if key is not None: args_dict[key] = _parse_value(temp) key = None else: args_list.append(_parse_value(temp)) for c in s: if c == _ARG_ESCAPE_SIGN: # Escape sign used. if escape: # Already escaping: print escape sign itself. temp += _ARG_ESCAPE_SIGN escape = False else: # Enter escape mode. escape = True elif escape: # Escape mode: print whatever comes after the symbol. escape = False temp += c elif inside[0] is True: # Inside single quotes or double quotes # Wait for the end symbol, allow everything else through, allow escape sign for typing quotes in strings if c == inside[1]: # Leaving single/double quoted area inside = (False, None) else: temp += c elif c == ""\"""" or c == ""'"": # Entering single/double quoted area inside = (True, c) is_string = True continue elif c == ""="": if is_string: # Invalid syntax because we don't allow named parameters to be quoted. return None elif key is None: # Named parameters mode and equals sign encountered. Record key and continue with value. key = temp.strip() temp = """" elif c == "","": # Comma encountered outside of quoted area. _add_value(skip, key, args_list, args_dict, temp) # Reset temp = """" skip = False is_string = False key = None else: # Any other character: add to buffer. temp += c if inside[0] is True: # Invalid syntax because we are inside a quoted area. return None else: _add_value(skip, key, args_list, args_dict, temp) return args_list, args_dict ","def parse_arguments(s, **kwargs): """""" This method takes a string and parses it as if it were an argument list to a function. It supports both positional and named arguments. Values are automatically converted to int or float if possible. Values surrounded by single or double quotes are treated as strings. Any other value is wrapped in a ""FunctionArgument"" class for later processing. Args: s (str): The string to convert. Returns: (list, dict): A tuple containing a list of arguments (list) and named arguments (dict). """""" global _ARG_ESCAPE_SIGN args_list = [] args_dict = {} # State (general) inside = (False, None) # Are we inside a quoted string? What is the quoted character? skip = False # Skip the current parameter? escape = False # Was the escape key used? is_string = False # Have we been inside a quoted string? temp = """" # Buffer key = None # Key (for named parameter) def _parse_value(temp): ret = temp.strip() if not is_string: try: ret = int(ret) except ValueError: try: ret = float(ret) except ValueError: if ret != """": return FunctionArgument(ret) return ret def _add_value(skip, key, args_list, args_dict, temp): if not skip: # Record value based on whether named parameters mode is set or not. if key is not None: args_dict[key] = _parse_value(temp) key = None else: args_list.append(_parse_value(temp)) for c in s: if c == _ARG_ESCAPE_SIGN: # Escape sign used. if not escape: # Enter escape mode. escape = True continue # Already escaping: print escape sign itself. temp += _ARG_ESCAPE_SIGN escape = False continue if escape: # Escape mode: print whatever comes after the symbol. escape = False temp += c continue if inside[0]: # Inside single quotes or double quotes # Wait for the end symbol, allow everything else through, allow escape sign for typing quotes in strings if c != inside[1]: temp += c continue # Leaving single/double quoted area inside = (False, None) continue if c == '""' or c == ""'"": # Entering single/double quoted area inside = (True, c) is_string = True continue if c == ""="": if is_string: # Invalid syntax because we don't allow named parameters to be quoted. return None if key is None: # Named parameters mode and equals sign encountered. Record key and continue with value. key = temp.strip() temp = """" continue if c == "","": # Comma encountered outside of quoted area. _add_value(skip, key, args_list, args_dict, temp) # Reset temp = """" skip = False is_string = False key = None continue # Any other character: add to buffer. temp += c if inside[0]: # Invalid syntax because we are inside a quoted area. return None _add_value(skip, key, args_list, args_dict, temp) return args_list, args_dict " 58376,"def _make_passthrough_contrast(level, contrast_names, model_type='meta', test=""t""): gb_dict = dict(Subject=['subject', 'contrast'], Session=['session', 'contrast'], Dataset=['contrast']) block = OrderedDict(Level=level, Name=level, GroupBy=gb_dict[level], Model={'Type': model_type, 'X': contrast_names}) contrasts = [] for cn in contrast_names: cdict = OrderedDict(Name=level.lower() + ""_"" + cn, ConditionList=[cn], Weights=[1], Test=test) contrasts.append(cdict) block[""Contrasts""] = contrasts return block ","def _make_passthrough_contrast(level, contrast_names, model_type='meta', test=""t""): gb_dict = dict(Subject=['subject', 'contrast'], Session=['session', 'contrast'], Dataset=['contrast']) block = dict(Level=level, Name=level, GroupBy=gb_dict[level], Model={'Type': model_type, 'X': contrast_names}) contrasts = [] for cn in contrast_names: cdict = OrderedDict(Name=level.lower() + ""_"" + cn, ConditionList=[cn], Weights=[1], Test=test) contrasts.append(cdict) block[""Contrasts""] = contrasts return block " 45450,"def iscoroutinefunction_or_partial(obj: typing.Any) -> bool: """""" Correctly determines if an object is a coroutine function, by unwrapping functools.partial objects. """""" while isinstance(obj, functools.partial): obj = obj.func return inspect.iscoroutinefunction(obj) ","def iscoroutinefunction_or_partial(obj: typing.Any) -> bool: """""" Correctly determines if an object is a coroutine function, including those wrapped in functools.partial objects. """""" while isinstance(obj, functools.partial): obj = obj.func return inspect.iscoroutinefunction(obj) " 48802,"def validate_group_key(k: str, max_length: int = 200) -> bool: """"""Validates value used as a group key."""""" if not isinstance(k, str): raise TypeError(f""The key has to be a string and is {type(k)}:{k}"") elif len(k) > max_length: raise AirflowException(f""The key has to be less than {max_length} characters"") elif not GROUP_KEY_REGEX.match(k): raise AirflowException( ""The key ({k}) has to be made of alphanumeric characters, dashes "" ""and underscores exclusively"".format(k=k) ) else: return True ","def validate_group_key(k: str, max_length: int = 200) -> bool: """"""Validates value used as a group key."""""" if not isinstance(k, str): raise TypeError(f""The key has to be a string and is {type(k)}:{k}"") elif len(k) > max_length: raise AirflowException(f""The key has to be less than {max_length} characters"") elif not GROUP_KEY_REGEX.match(k): raise AirflowException( f""The key ({k}) has to be made of alphanumeric characters, dashes "" ""and underscores exclusively"" ) else: return True " 36673,"def getproxies_environment(): """"""Return a dictionary of scheme -> proxy server URL mappings. Scan the environment for variables named _proxy; this seems to be the standard convention. If you need a different way, you can pass a proxies dictionary to the [Fancy]URLopener constructor. """""" # in order to prefer lowercase variables, process environment in # two passes: first matches any, second pass matches lowercase only # select only environment variables which end in (after making lowercase) _proxy candidate_names = [name for name in os.environ.keys() if len(name)>5 and name[-6]=='_'] # fast selection of candidates environment = [(name, os.environ[name], name.lower()) for name in candidate_names if name[-6:].lower()=='_proxy'] proxies = {} for name, value, name_lower in environment: if value and name_lower[-6:] == '_proxy': proxies[name_lower[:-6]] = value # CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY # (non-all-lowercase) as it may be set from the web server by a ""Proxy:"" # header from the client # If ""proxy"" is lowercase, it will still be used thanks to the next block if 'REQUEST_METHOD' in os.environ: proxies.pop('http', None) for name, value, name_lower in environment: if name[-6:] == '_proxy': if value: proxies[name_lower[:-6]] = value else: proxies.pop(name_lower[:-6], None) return proxies ","def getproxies_environment(): """"""Return a dictionary of scheme -> proxy server URL mappings. Scan the environment for variables named _proxy; this seems to be the standard convention. If you need a different way, you can pass a proxies dictionary to the [Fancy]URLopener constructor. """""" # in order to prefer lowercase variables, process environment in # two passes: first matches any, second pass matches lowercase only # select only environment variables which end in (after making lowercase) _proxy candidate_names = [name for name in os.environ.keys() if len(name)>5 and name[-6]=='_'] # fast selection of candidates environment = [(name, os.environ[name], name.lower()) for name in candidate_names if name[-6:].lower()=='_proxy'] proxies = {} for name, value, name_lower in environment: if value and name_lower[-6:] == '_proxy': proxies[name_lower[:-6]] = value # CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY # (non-all-lowercase) as it may be set from the web server by a ""Proxy:"" # header from the client # If ""proxy"" is lowercase, it will still be used thanks to the next block if 'REQUEST_METHOD' in os.environ: proxies.pop('http', None) for name, value, proxy_name in environment: if name[-6:] == '_proxy': if value: proxies[name_lower[:-6]] = value else: proxies.pop(name_lower[:-6], None) return proxies " 41889,"def _generate_contour_subplot( trials: List[FrozenTrial], x_param: str, y_param: str, direction: StudyDirection, param_values_range: dict, ) -> Tuple[""Contour"", ""Scatter""]: x_indices = sorted(list({t.params[x_param] for t in trials if x_param in t.params})) y_indices = sorted(list({t.params[y_param] for t in trials if y_param in t.params})) if len(x_indices) < 2: _logger.warning(""Param {} unique value length is less than 2."".format(x_param)) return go.Contour(), go.Scatter() if len(y_indices) < 2: _logger.warning(""Param {} unique value length is less than 2."".format(y_param)) return go.Contour(), go.Scatter() x_range = param_values_range[x_param] x_indices = [x_range[0]] + x_indices + [x_range[1]] y_range = param_values_range[y_param] y_indices = [y_range[0]] + y_indices + [y_range[1]] z = [[float(""nan"") for _ in range(len(x_indices))] for _ in range(len(y_indices))] x_values = [] y_values = [] for trial in trials: if x_param not in trial.params or y_param not in trial.params: continue x_values.append(trial.params[x_param]) y_values.append(trial.params[y_param]) x_i = x_indices.index(trial.params[x_param]) y_i = y_indices.index(trial.params[y_param]) if isinstance(trial.value, int): value = float(trial.value) elif isinstance(trial.value, float): value = trial.value else: raise ValueError( ""Trial{} has COMPLETE state, but its value is non-numeric."".format(trial.number) ) z[y_i][x_i] = value # TODO(Yanase): Use reversescale argument to reverse colorscale if Plotly's bug is fixed. # If contours_coloring='heatmap' is specified, reversesecale argument of go.Contour does not # work correctly. See https://github.com/pfnet/optuna/issues/606. colorscale = plotly.colors.PLOTLY_SCALES[""Blues""] if direction == StudyDirection.MINIMIZE: colorscale = [[1 - t[0], t[1]] for t in colorscale] colorscale.reverse() contour = go.Contour( x=x_indices, y=y_indices, z=z, colorbar={""title"": ""Objective Value""}, colorscale=colorscale, connectgaps=True, contours_coloring=""heatmap"", hoverinfo=""none"", line_smoothing=1.3, ) scatter = go.Scatter( x=x_values, y=y_values, marker={""line"": {""width"": 0.5, ""color"": ""Grey""}, ""color"": ""black""}, mode=""markers"", showlegend=False, ) return (contour, scatter) ","def _generate_contour_subplot( trials: List[FrozenTrial], x_param: str, y_param: str, direction: StudyDirection, param_values_range: Dict[str, Tuple[float, float]], ) -> Tuple[""Contour"", ""Scatter""]: x_indices = sorted(list({t.params[x_param] for t in trials if x_param in t.params})) y_indices = sorted(list({t.params[y_param] for t in trials if y_param in t.params})) if len(x_indices) < 2: _logger.warning(""Param {} unique value length is less than 2."".format(x_param)) return go.Contour(), go.Scatter() if len(y_indices) < 2: _logger.warning(""Param {} unique value length is less than 2."".format(y_param)) return go.Contour(), go.Scatter() x_range = param_values_range[x_param] x_indices = [x_range[0]] + x_indices + [x_range[1]] y_range = param_values_range[y_param] y_indices = [y_range[0]] + y_indices + [y_range[1]] z = [[float(""nan"") for _ in range(len(x_indices))] for _ in range(len(y_indices))] x_values = [] y_values = [] for trial in trials: if x_param not in trial.params or y_param not in trial.params: continue x_values.append(trial.params[x_param]) y_values.append(trial.params[y_param]) x_i = x_indices.index(trial.params[x_param]) y_i = y_indices.index(trial.params[y_param]) if isinstance(trial.value, int): value = float(trial.value) elif isinstance(trial.value, float): value = trial.value else: raise ValueError( ""Trial{} has COMPLETE state, but its value is non-numeric."".format(trial.number) ) z[y_i][x_i] = value # TODO(Yanase): Use reversescale argument to reverse colorscale if Plotly's bug is fixed. # If contours_coloring='heatmap' is specified, reversesecale argument of go.Contour does not # work correctly. See https://github.com/pfnet/optuna/issues/606. colorscale = plotly.colors.PLOTLY_SCALES[""Blues""] if direction == StudyDirection.MINIMIZE: colorscale = [[1 - t[0], t[1]] for t in colorscale] colorscale.reverse() contour = go.Contour( x=x_indices, y=y_indices, z=z, colorbar={""title"": ""Objective Value""}, colorscale=colorscale, connectgaps=True, contours_coloring=""heatmap"", hoverinfo=""none"", line_smoothing=1.3, ) scatter = go.Scatter( x=x_values, y=y_values, marker={""line"": {""width"": 0.5, ""color"": ""Grey""}, ""color"": ""black""}, mode=""markers"", showlegend=False, ) return (contour, scatter) " 50307,"def test_update_save_project_release(project, release): release = project.releases.get(release_tag_name) release.description = release_description + ""updated"" release.save() assert project.releases.get(release_tag_name) assert release.tag_name == release_tag_name assert release.description == release_description + ""updated"" ","def test_update_save_project_release(project, release): updated_description = f""{release.description} updated"" release.description = updated_description release.save() release = project.releases.get(release.tag_name) assert release.description == updated_description " 57846,"def reverse_dig_result(server: str, name: str): try: if server: server = f""@{server}"" dig_output = subprocess.check_output( ['dig', server, '+answer', '-x', name, '+short', '+identify'], stderr=subprocess.STDOUT, universal_newlines=True ) if not dig_output: raise ValueError(""Couldn't find PTR record for:\n"" + name) resolved_addresses, dns_server = regex_result(dig_output, reverse_lookup=True) return {""name"": name, ""resolveddomain"": resolved_addresses, ""nameserver"": dns_server} else: dig_output = subprocess.check_output( ['dig', '+answer', '-x', name, '+short', '+identify'], stderr=subprocess.STDOUT, universal_newlines=True ) if not dig_output: raise ValueError(""Couldn't find PTR record for:\n"" + name) resolved_addresses, dns_server = regex_result(dig_output, reverse_lookup=True) return {""name"": name, ""resolveddomain"": resolved_addresses, ""nameserver"": dns_server} except Exception as e: if isinstance(e, subprocess.CalledProcessError): msg = e.output # pylint: disable=no-member else: msg = str(e) return_error(msg) ","def reverse_dig_result(server: str, name: str): try: if server: server = f""@{server}"" dig_output = subprocess.check_output( ['dig', server, '+answer', '-x', name, '+short', '+identify'], stderr=subprocess.STDOUT, universal_newlines=True ) if not dig_output: raise ValueError(""Couldn't find PTR record for:\n"" + name) resolved_addresses, dns_server = regex_result(dig_output, reverse_lookup=True) return {""name"": name, ""resolveddomain"": resolved_addresses, ""nameserver"": dns_server} else: dig_output = subprocess.check_output( ['dig', '+answer', '-x', name, '+short', '+identify'], stderr=subprocess.STDOUT, universal_newlines=True ) if not dig_output: raise ValueError(""Couldn't find PTR record for:\n"" + name) resolved_addresses, dns_server = regex_result(dig_output, reverse_lookup=True) return {""name"": name, ""resolveddomain"": resolved_addresses, ""nameserver"": dns_server} except subprocess.CalledProcessError as e: return_error(e.output) " 32182,"def get_notes_for_alert( client: Client, investigation: Dict, alert: Dict, last_update: datetime, update_status: bool, ): """""" Retrieve any comments/attachments as XSOAR entries """""" alert_id = str(alert[""id""]) entries = [] comments = get_comments_for_alert(client, alert_id, last_update) entries.extend(comments_to_notes(client, comments)) entries = sorted(entries, key=lambda x: x[""sort""] if ""sort"" in x else """") # Remove sort field from entries now that they are sorted correctly for entry in entries: entry.pop(""sort"") # Times for syncing local_last_closed = get_last_closed(investigation) local_last_reopened = get_last_reopened(investigation) remote_last_closed = get_alert_last_closed(alert) remote_last_reopened = get_alert_last_reopened(alert) if ( update_status and alert[""status""] == ""closed"" and client.close_incident and remote_last_closed > local_last_reopened ): # Use the last comment as a close comment if comments: last_comment = comments[-1][""comment""] else: last_comment = """" close_reason = ZTAP_STATUS_TO_XSOAR.get(alert[""review_outcome""], ""Other"") entries.append( { ""Type"": EntryType.NOTE, ""Contents"": { ""dbotIncidentClose"": True, ""closeReason"": close_reason, ""closeNotes"": f""From ZTAP: {last_comment}"", }, ""ContentsFormat"": EntryFormat.JSON, } ) demisto.info(f""Closing incident from ZTAP {alert_id}"") if ( update_status and alert[""status""] != ""closed"" and remote_last_reopened > local_last_closed and client.reopen_incident ): entries.append( { ""Type"": EntryType.NOTE, ""Contents"": { ""dbotIncidentReopen"": True, }, ""ContentsFormat"": EntryFormat.JSON, } ) demisto.info(f""Reopening incident from ZTAP {alert_id}"") return entries ","def get_notes_for_alert( client: Client, investigation: Dict, alert: Dict, last_update: datetime, update_status: bool, ): """""" Retrieve any comments/attachments as XSOAR entries """""" alert_id = str(alert[""id""]) entries = [] comments = get_comments_for_alert(client, alert_id, last_update) entries.extend(comments_to_notes(client, comments)) entries = sorted(entries, key=lambda x: x.get(""sort"", """")) # Remove sort field from entries now that they are sorted correctly for entry in entries: entry.pop(""sort"") # Times for syncing local_last_closed = get_last_closed(investigation) local_last_reopened = get_last_reopened(investigation) remote_last_closed = get_alert_last_closed(alert) remote_last_reopened = get_alert_last_reopened(alert) if ( update_status and alert[""status""] == ""closed"" and client.close_incident and remote_last_closed > local_last_reopened ): # Use the last comment as a close comment if comments: last_comment = comments[-1][""comment""] else: last_comment = """" close_reason = ZTAP_STATUS_TO_XSOAR.get(alert[""review_outcome""], ""Other"") entries.append( { ""Type"": EntryType.NOTE, ""Contents"": { ""dbotIncidentClose"": True, ""closeReason"": close_reason, ""closeNotes"": f""From ZTAP: {last_comment}"", }, ""ContentsFormat"": EntryFormat.JSON, } ) demisto.info(f""Closing incident from ZTAP {alert_id}"") if ( update_status and alert[""status""] != ""closed"" and remote_last_reopened > local_last_closed and client.reopen_incident ): entries.append( { ""Type"": EntryType.NOTE, ""Contents"": { ""dbotIncidentReopen"": True, }, ""ContentsFormat"": EntryFormat.JSON, } ) demisto.info(f""Reopening incident from ZTAP {alert_id}"") return entries " 45705,"def forecast( vil, velocity, timesteps, rainrate=None, n_cascade_levels=8, extrap_method=""semilagrangian"", ar_order=2, ar_window_radius=50, r_vil_window_radius=3, fft_method=""numpy"", apply_rainrate_mask=True, num_workers=1, extrap_kwargs=None, filter_kwargs=None, measure_time=False, ): """"""Generate a nowcast by using the autoregressive nowcasting using VIL (ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast. The key features are: 1) Growth and decay: implemented by using a cascade decomposition and a multiscale autoregressive integrated ARI(p,1) model. Instead of the original time series, the ARI model is applied to the differenced one corresponding to time derivatives. 2) Originally designed for using integrated liquid (VIL) as the input data. In this case, the rain rate (R) is obtained from VIL via an empirical relation. This implementation is more general so that the input can be any two-dimensional precipitation field. 3) The parameters of the ARI model and the R(VIL) relation are allowed to be spatially variable. The estimation is done using a moving window. Parameters ---------- vil: array_like Array of shape (ar_order+2,m,n) containing the input fields ordered by timestamp from oldest to newest. The inputs are expected to contain VIL or rain rate. The time steps between the inputs are assumed to be regular. velocity: array_like Array of shape (2,m,n) containing the x- and y-components of the advection field. The velocities are assumed to represent one time step between the inputs. All values are required to be finite. timesteps: int or list Number of time steps to forecast or a list of time steps for which the forecasts are computed (relative to the input time step). The elements of the list are required to be in ascending order. rainrate: array_like Array of shape (m,n) containing the most recently observed rain rate field. If set to None, no R(VIL) conversion is done and the outputs are in the same units as the inputs. n_cascade_levels: int, optional The number of cascade levels to use. extrap_method: str, optional Name of the extrapolation method to use. See the documentation of pysteps.extrapolation.interface. ar_order: int, optional The order of the autoregressive model to use. The recommended values are 1 or 2. Using a higher-order model is strongly discouraged because the stationarity of the AR process cannot be guaranteed. ar_window_radius: int, optional The radius of the window to use for determining the parameters of the autoregressive model. Set to None to disable localization. r_vil_window_radius: int, optional The radius of the window to use for determining the R(VIL) relation. Applicable if rainrate is not None. fft_method: str, optional A string defining the FFT method to use (see utils.fft.get_method). Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed, the recommended method is 'pyfftw'. apply_rainrate_mask: bool Apply mask to prevent producing precipitation to areas where it was not originally observed. Defaults to True. Disabling this may improve some verification metrics but increases the number of false alarms. Applicable if rainrate is None. num_workers: int, optional The number of workers to use for parallel computation. Applicable if dask is installed or pyFFTW is used for computing the FFT. When num_workers>1, it is advisable to disable OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous threads. extrap_kwargs: dict, optional Optional dictionary containing keyword arguments for the extrapolation method. See the documentation of pysteps.extrapolation. filter_kwargs: dict, optional Optional dictionary containing keyword arguments for the filter method. See the documentation of pysteps.cascade.bandpass_filters.py. measure_time: bool, optional If True, measure, print and return the computation time. Returns ------- out: ndarray A three-dimensional array of shape (num_timesteps,m,n) containing a time series of forecast precipitation fields. The time series starts from t0+timestep, where timestep is taken from the input VIL/rain rate fields. If measure_time is True, the return value is a three-element tuple containing the nowcast array, the initialization time of the nowcast generator and the time used in the main loop (seconds). References ---------- :cite:`PCLH2020` """""" _check_inputs(vil, rainrate, velocity, timesteps, ar_order) if extrap_kwargs is None: extrap_kwargs = dict() else: extrap_kwargs = extrap_kwargs.copy() if filter_kwargs is None: filter_kwargs = dict() print(""Computing ANVIL nowcast:"") print(""------------------------"") print("""") print(""Inputs:"") print(""-------"") print(""input dimensions: %dx%d"" % (vil.shape[1], vil.shape[2])) print("""") print(""Methods:"") print(""--------"") print(""extrapolation: %s"" % extrap_method) print(""FFT: %s"" % fft_method) print("""") print(""Parameters:"") print(""-----------"") if isinstance(timesteps, int): print(""number of time steps: %d"" % timesteps) else: print(""time steps: %s"" % timesteps) print(""parallel threads: %d"" % num_workers) print(""number of cascade levels: %d"" % n_cascade_levels) print(""order of the ARI(p,1) model: %d"" % ar_order) if type(ar_window_radius) == int: print(""ARI(p,1) window radius: %d"" % ar_window_radius) else: print(""ARI(p,1) window radius: none"") print(""R(VIL) window radius: %d"" % r_vil_window_radius) if measure_time: starttime_init = time.time() m, n = vil.shape[1:] vil = vil.copy() if rainrate is None and apply_rainrate_mask: rainrate_mask = vil[-1, :] < 0.1 if rainrate is not None: # determine the coefficients fields of the relation R=a*VIL+b by # localized linear regression r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius) # transform the input fields to Lagrangian coordinates by extrapolation extrapolator = extrapolation.get_method(extrap_method) res = list() def worker(vil, i): return ( i, extrapolator( vil[i, :], velocity, vil.shape[0] - 1 - i, allow_nonfinite_values=True, **extrap_kwargs, )[-1], ) for i in range(vil.shape[0] - 1): if not DASK_IMPORTED or num_workers == 1: vil[i, :, :] = worker(vil, i)[1] else: res.append(dask.delayed(worker)(vil, i)) if DASK_IMPORTED and num_workers > 1: num_workers_ = len(res) if num_workers > len(res) else num_workers vil_e = dask.compute(*res, num_workers=num_workers_) for i in range(len(vil_e)): vil[vil_e[i][0], :] = vil_e[i][1] # compute the final mask as the intersection of the masks of the advected # fields mask = np.isfinite(vil[0, :]) for i in range(1, vil.shape[0]): mask = np.logical_and(mask, np.isfinite(vil[i, :])) if rainrate is None and apply_rainrate_mask: rainrate_mask = np.logical_and(rainrate_mask, mask) # apply cascade decomposition to the advected input fields bp_filter_method = cascade.get_method(""gaussian"") bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs) fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers) decomp_method, recomp_method = cascade.get_method(""fft"") vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n)) for i in range(vil.shape[0]): vil_ = vil[i, :].copy() vil_[~np.isfinite(vil_)] = 0.0 vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft) for j in range(n_cascade_levels): vil_dec[j, i, :] = vil_dec_i[""cascade_levels""][j, :] # compute time-lagged correlation coefficients for the cascade levels of # the advected and differenced input fields gamma = np.empty((n_cascade_levels, ar_order, m, n)) for i in range(n_cascade_levels): vil_diff = np.diff(vil_dec[i, :], axis=0) vil_diff[~np.isfinite(vil_diff)] = 0.0 for j in range(ar_order): gamma[i, j, :] = _moving_window_corrcoef( vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius ) if ar_order == 2: # if the order of the ARI model is 2, adjust the correlation coefficients # so that the resulting process is stationary for i in range(n_cascade_levels): gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2( gamma[i, 0, :], gamma[i, 1, :] ) # estimate the parameters of the ARI models phi = [] for i in range(n_cascade_levels): if ar_order > 2: phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1) elif ar_order == 2: phi_ = _estimate_ar2_params(gamma[i, :]) else: phi_ = _estimate_ar1_params(gamma[i, :]) phi.append(phi_) vil_dec = vil_dec[:, -(ar_order + 1) :, :] if measure_time: init_time = time.time() - starttime_init print(""Starting nowcast computation."") if measure_time: starttime_mainloop = time.time() r_f = [] if isinstance(timesteps, int): timesteps = range(timesteps + 1) timestep_type = ""int"" else: original_timesteps = [0] + list(timesteps) timesteps = nowcast_utils.binned_timesteps(original_timesteps) timestep_type = ""list"" if rainrate is not None: r_f_prev = r_vil_a * vil[-1, :] + r_vil_b else: r_f_prev = vil[-1, :] extrap_kwargs[""return_displacement""] = True dp = None t_nowcast = 0 t_prev = 0.0 for t in range(len(timesteps)): if timestep_type == ""list"": subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]] else: subtimesteps = [t] if len(subtimesteps) > 1 or t > 0: nowcast_time_step = True else: nowcast_time_step = False if nowcast_time_step: print( ""Computing nowcast for time step %d... "" % (t_nowcast + 1), end="""", flush=True, ) t_nowcast += 1 if measure_time: starttime = time.time() # iterate the ARI models for each cascade level for i in range(n_cascade_levels): vil_dec[i, :] = autoregression.iterate_ar_model(vil_dec[i, :], phi[i]) # recompose the cascade to obtain the forecast field vil_dec_dict = {} vil_dec_dict[""cascade_levels""] = vil_dec[:, -1, :] vil_dec_dict[""domain""] = ""spatial"" vil_dec_dict[""normalized""] = False vil_f = recomp_method(vil_dec_dict) vil_f[~mask] = np.nan if rainrate is not None: # convert VIL to rain rate r_f_new = r_vil_a * vil_f + r_vil_b else: r_f_new = vil_f if apply_rainrate_mask: r_f_new[rainrate_mask] = 0.0 r_f_new[r_f_new < 0.0] = 0.0 # advect the recomposed field to obtain the forecast for the current # time step (or subtimesteps if non-integer time steps are given) for t_sub in subtimesteps: if t_sub > 0: t_diff_prev_int = t_sub - int(t_sub) if t_diff_prev_int > 0.0: r_f_ip = ( 1.0 - t_diff_prev_int ) * r_f_prev + t_diff_prev_int * r_f_new else: r_f_ip = r_f_prev t_diff_prev = t_sub - t_prev extrap_kwargs[""displacement_prev""] = dp r_f_ep, dp = extrapolator( r_f_ip, velocity, [t_diff_prev], allow_nonfinite_values=True, **extrap_kwargs, ) r_f.append(r_f_ep[0]) t_prev = t_sub # advect the forecast field by one time step if no subtimesteps in the # current interval were found if len(subtimesteps) == 0: t_diff_prev = t + 1 - t_prev extrap_kwargs[""displacement_prev""] = dp _, dp = extrapolator( None, velocity, [t_diff_prev], allow_nonfinite_values=True, **extrap_kwargs, ) t_prev = t + 1 r_f_prev = r_f_new if nowcast_time_step: if measure_time: print(""%.2f seconds."" % (time.time() - starttime)) else: print(""done."") if measure_time: mainloop_time = time.time() - starttime_mainloop if measure_time: return np.stack(r_f), init_time, mainloop_time else: return np.stack(r_f) ","def forecast( vil, velocity, timesteps, rainrate=None, n_cascade_levels=8, extrap_method=""semilagrangian"", ar_order=2, ar_window_radius=50, r_vil_window_radius=3, fft_method=""numpy"", apply_rainrate_mask=True, num_workers=1, extrap_kwargs=None, filter_kwargs=None, measure_time=False, ): """"""Generate a nowcast by using the autoregressive nowcasting using VIL (ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast. The key features are: 1) Growth and decay: implemented by using a cascade decomposition and a multiscale autoregressive integrated ARI(p,1) model. Instead of the original time series, the ARI model is applied to the differenced one corresponding to time derivatives. 2) Originally designed for using integrated liquid (VIL) as the input data. In this case, the rain rate (R) is obtained from VIL via an empirical relation. This implementation is more general so that the input can be any two-dimensional precipitation field. 3) The parameters of the ARI model and the R(VIL) relation are allowed to be spatially variable. The estimation is done using a moving window. Parameters ---------- vil: array_like Array of shape (ar_order+2,m,n) containing the input fields ordered by timestamp from oldest to newest. The inputs are expected to contain VIL or rain rate. The time steps between the inputs are assumed to be regular. velocity: array_like Array of shape (2,m,n) containing the x- and y-components of the advection field. The velocities are assumed to represent one time step between the inputs. All values are required to be finite. timesteps: int or list Number of time steps to forecast or a list of time steps for which the forecasts are computed (relative to the input time step). The elements of the list are required to be in ascending order. rainrate: array_like Array of shape (m,n) containing the most recently observed rain rate field. If set to None, no R(VIL) conversion is done and the outputs are in the same units as the inputs. n_cascade_levels: int, optional The number of cascade levels to use. extrap_method: str, optional Name of the extrapolation method to use. See the documentation of pysteps.extrapolation.interface. ar_order: int, optional The order of the autoregressive model to use. The recommended values are 1 or 2. Using a higher-order model is strongly discouraged because the stationarity of the AR process cannot be guaranteed. ar_window_radius: int, optional The radius of the window to use for determining the parameters of the autoregressive model. Set to None to disable localization. r_vil_window_radius: int, optional The radius of the window to use for determining the R(VIL) relation. Applicable if rainrate is not None. fft_method: str, optional A string defining the FFT method to use (see utils.fft.get_method). Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed, the recommended method is 'pyfftw'. apply_rainrate_mask: bool Apply mask to prevent producing precipitation to areas where it was not originally observed. Defaults to True. Disabling this may improve some verification metrics but increases the number of false alarms. Applicable if rainrate is None. num_workers: int, optional The number of workers to use for parallel computation. Applicable if dask is installed or pyFFTW is used for computing the FFT. When num_workers>1, it is advisable to disable OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous threads. extrap_kwargs: dict, optional Optional dictionary containing keyword arguments for the extrapolation method. See the documentation of pysteps.extrapolation. filter_kwargs: dict, optional Optional dictionary containing keyword arguments for the filter method. See the documentation of pysteps.cascade.bandpass_filters.py. measure_time: bool, optional If True, measure, print and return the computation time. Returns ------- out: ndarray A three-dimensional array of shape (num_timesteps,m,n) containing a time series of forecast precipitation fields. The time series starts from t0+timestep, where timestep is taken from the input VIL/rain rate fields. If measure_time is True, the return value is a three-element tuple containing the nowcast array, the initialization time of the nowcast generator and the time used in the main loop (seconds). References ---------- :cite:`PCLH2020` """""" _check_inputs(vil, rainrate, velocity, timesteps, ar_order) if extrap_kwargs is None: extrap_kwargs = dict() else: extrap_kwargs = extrap_kwargs.copy() if filter_kwargs is None: filter_kwargs = dict() print(""Computing ANVIL nowcast:"") print(""------------------------"") print("""") print(""Inputs:"") print(""-------"") print(""input dimensions: %dx%d"" % (vil.shape[1], vil.shape[2])) print("""") print(""Methods:"") print(""--------"") print(""extrapolation: %s"" % extrap_method) print(""FFT: %s"" % fft_method) print("""") print(""Parameters:"") print(""-----------"") if isinstance(timesteps, int): print(""number of time steps: %d"" % timesteps) else: print(""time steps: %s"" % timesteps) print(""parallel threads: %d"" % num_workers) print(""number of cascade levels: %d"" % n_cascade_levels) print(""order of the ARI(p,1) model: %d"" % ar_order) if type(ar_window_radius) == int: print(""ARI(p,1) window radius: %d"" % ar_window_radius) else: print(""ARI(p,1) window radius: none"") print(""R(VIL) window radius: %d"" % r_vil_window_radius) if measure_time: starttime_init = time.time() m, n = vil.shape[1:] vil = vil.copy() if rainrate is None and apply_rainrate_mask: rainrate_mask = vil[-1, :] < 0.1 if rainrate is not None: # determine the coefficients fields of the relation R=a*VIL+b by # localized linear regression r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius) # transform the input fields to Lagrangian coordinates by extrapolation extrapolator = extrapolation.get_method(extrap_method) res = list() def worker(vil, i): return ( i, extrapolator( vil[i, :], velocity, vil.shape[0] - 1 - i, allow_nonfinite_values=True, **extrap_kwargs, )[-1], ) for i in range(vil.shape[0] - 1): if not DASK_IMPORTED or num_workers == 1: vil[i, :, :] = worker(vil, i)[1] else: res.append(dask.delayed(worker)(vil, i)) if DASK_IMPORTED and num_workers > 1: num_workers_ = len(res) if num_workers > len(res) else num_workers vil_e = dask.compute(*res, num_workers=num_workers_) for i in range(len(vil_e)): vil[vil_e[i][0], :] = vil_e[i][1] # compute the final mask as the intersection of the masks of the advected # fields mask = np.isfinite(vil[0, :]) for i in range(1, vil.shape[0]): mask = np.logical_and(mask, np.isfinite(vil[i, :])) if rainrate is None and apply_rainrate_mask: rainrate_mask = np.logical_and(rainrate_mask, mask) # apply cascade decomposition to the advected input fields bp_filter_method = cascade.get_method(""gaussian"") bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs) fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers) decomp_method, recomp_method = cascade.get_method(""fft"") vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n)) for i in range(vil.shape[0]): vil_ = vil[i, :].copy() vil_[~np.isfinite(vil_)] = 0.0 vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft) for j in range(n_cascade_levels): vil_dec[j, i, :] = vil_dec_i[""cascade_levels""][j, :] # compute time-lagged correlation coefficients for the cascade levels of # the advected and differenced input fields gamma = np.empty((n_cascade_levels, ar_order, m, n)) for i in range(n_cascade_levels): vil_diff = np.diff(vil_dec[i, :], axis=0) vil_diff[~np.isfinite(vil_diff)] = 0.0 for j in range(ar_order): gamma[i, j, :] = _moving_window_corrcoef( vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius ) if ar_order == 2: # if the order of the ARI model is 2, adjust the correlation coefficients # so that the resulting process is stationary for i in range(n_cascade_levels): gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2( gamma[i, 0, :], gamma[i, 1, :] ) # estimate the parameters of the ARI models phi = [] for i in range(n_cascade_levels): if ar_order > 2: phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1) elif ar_order == 2: phi_ = _estimate_ar2_params(gamma[i, :]) else: phi_ = _estimate_ar1_params(gamma[i, :]) phi.append(phi_) vil_dec = vil_dec[:, -(ar_order + 1) :, :] if measure_time: init_time = time.time() - starttime_init print(""Starting nowcast computation."") if measure_time: starttime_mainloop = time.time() r_f = [] if isinstance(timesteps, int): timesteps = range(timesteps + 1) timestep_type = ""int"" else: original_timesteps = [0] + list(timesteps) timesteps = nowcast_utils.binned_timesteps(original_timesteps) timestep_type = ""list"" if rainrate is not None: r_f_prev = r_vil_a * vil[-1, :] + r_vil_b else: r_f_prev = vil[-1, :] extrap_kwargs[""return_displacement""] = True dp = None t_nowcast = 0 t_prev = 0.0 for t in range(len(timesteps)): if timestep_type == ""list"": subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]] else: subtimesteps = [t] if len(subtimesteps) > 1 or t > 0: nowcast_time_step = True else: nowcast_time_step = False if nowcast_time_step: print( ""Computing nowcast for time step %d... "" % t, end="""", flush=True, ) t_nowcast += 1 if measure_time: starttime = time.time() # iterate the ARI models for each cascade level for i in range(n_cascade_levels): vil_dec[i, :] = autoregression.iterate_ar_model(vil_dec[i, :], phi[i]) # recompose the cascade to obtain the forecast field vil_dec_dict = {} vil_dec_dict[""cascade_levels""] = vil_dec[:, -1, :] vil_dec_dict[""domain""] = ""spatial"" vil_dec_dict[""normalized""] = False vil_f = recomp_method(vil_dec_dict) vil_f[~mask] = np.nan if rainrate is not None: # convert VIL to rain rate r_f_new = r_vil_a * vil_f + r_vil_b else: r_f_new = vil_f if apply_rainrate_mask: r_f_new[rainrate_mask] = 0.0 r_f_new[r_f_new < 0.0] = 0.0 # advect the recomposed field to obtain the forecast for the current # time step (or subtimesteps if non-integer time steps are given) for t_sub in subtimesteps: if t_sub > 0: t_diff_prev_int = t_sub - int(t_sub) if t_diff_prev_int > 0.0: r_f_ip = ( 1.0 - t_diff_prev_int ) * r_f_prev + t_diff_prev_int * r_f_new else: r_f_ip = r_f_prev t_diff_prev = t_sub - t_prev extrap_kwargs[""displacement_prev""] = dp r_f_ep, dp = extrapolator( r_f_ip, velocity, [t_diff_prev], allow_nonfinite_values=True, **extrap_kwargs, ) r_f.append(r_f_ep[0]) t_prev = t_sub # advect the forecast field by one time step if no subtimesteps in the # current interval were found if len(subtimesteps) == 0: t_diff_prev = t + 1 - t_prev extrap_kwargs[""displacement_prev""] = dp _, dp = extrapolator( None, velocity, [t_diff_prev], allow_nonfinite_values=True, **extrap_kwargs, ) t_prev = t + 1 r_f_prev = r_f_new if nowcast_time_step: if measure_time: print(""%.2f seconds."" % (time.time() - starttime)) else: print(""done."") if measure_time: mainloop_time = time.time() - starttime_mainloop if measure_time: return np.stack(r_f), init_time, mainloop_time else: return np.stack(r_f) " 55331,"def run_v2_pipeline( client: kfp.Client, fn: Callable, driver_image: str, launcher_v2_image: str, pipeline_root: str, enable_caching: bool, arguments: Mapping[str, str], ): import tempfile import subprocess original_pipeline_spec = tempfile.mktemp( suffix='.json', prefix=""original_pipeline_spec"") kfp.v2.compiler.Compiler().compile( pipeline_func=fn, package_path=original_pipeline_spec) # remove following overriding logic once we use create_run_from_job_spec to trigger kfp pipeline run with open(original_pipeline_spec) as f: pipeline_job_dict = { 'pipelineSpec': json.load(f), 'runtimeConfig': {}, } for component in [pipeline_job_dict['pipelineSpec']['root']] + list( pipeline_job_dict['pipelineSpec']['components'].values()): if 'dag' in component: for task in component['dag']['tasks'].values(): task['cachingOptions'] = {'enableCache': enable_caching} for input_name, input_spec in pipeline_job_dict['pipelineSpec']['root'].get( 'inputDefinitions', {}).get('parameters', {}).items(): if 'defaultValue' in input_spec: if 'parameterValues' not in pipeline_job_dict['runtimeConfig']: pipeline_job_dict['runtimeConfig']['parameterValues'] = {} pipeline_job_dict['runtimeConfig']['parameterValues'][ input_name] = input_spec['defaultValue'] for k, v in arguments.items(): parameter_value = pipeline_job_dict['runtimeConfig']['parameterValues'][ k] pipeline_job_dict['runtimeConfig']['parameterValues'][ k] = parameter_value pipeline_job = tempfile.mktemp(suffix='.json', prefix=""pipeline_job"") with open(pipeline_job, 'w') as f: json.dump(pipeline_job_dict, f) argo_workflow_spec = tempfile.mktemp(suffix='.yaml') with open(argo_workflow_spec, 'w') as f: args = [ 'kfp-v2-compiler', '--spec', pipeline_job, '--driver', driver_image, '--launcher', launcher_v2_image, '--pipeline_root', pipeline_root, ] # call v2 backend compiler CLI to compile pipeline spec to argo workflow subprocess.check_call(args, stdout=f) return client.create_run_from_pipeline_package( pipeline_file=argo_workflow_spec, arguments={}, enable_caching=enable_caching) ","def run_v2_pipeline( client: kfp.Client, fn: Callable, driver_image: str, launcher_v2_image: str, pipeline_root: str, enable_caching: bool, arguments: Mapping[str, str], ): import tempfile import subprocess original_pipeline_spec = tempfile.mktemp( suffix='.json', prefix=""original_pipeline_spec"") kfp.v2.compiler.Compiler().compile( pipeline_func=fn, package_path=original_pipeline_spec) # remove following overriding logic once we use create_run_from_job_spec to trigger kfp pipeline run with open(original_pipeline_spec) as f: pipeline_job_dict = { 'pipelineSpec': json.load(f), 'runtimeConfig': {}, } for component in [pipeline_job_dict['pipelineSpec']['root']] + list( pipeline_job_dict['pipelineSpec']['components'].values()): if 'dag' in component: for task in component['dag']['tasks'].values(): task['cachingOptions'] = {'enableCache': enable_caching} for input_name, input_spec in pipeline_job_dict['pipelineSpec']['root'].get( 'inputDefinitions', {}).get('parameters', {}).items(): if 'defaultValue' in input_spec: if 'parameterValues' not in pipeline_job_dict['runtimeConfig']: pipeline_job_dict['runtimeConfig']['parameterValues'] = {} pipeline_job_dict['runtimeConfig']['parameterValues'][ input_name] = input_spec['defaultValue'] for k, v in arguments.items(): pipeline_job_dict['runtimeConfig']['parameterValues'][ k] = v pipeline_job = tempfile.mktemp(suffix='.json', prefix=""pipeline_job"") with open(pipeline_job, 'w') as f: json.dump(pipeline_job_dict, f) argo_workflow_spec = tempfile.mktemp(suffix='.yaml') with open(argo_workflow_spec, 'w') as f: args = [ 'kfp-v2-compiler', '--spec', pipeline_job, '--driver', driver_image, '--launcher', launcher_v2_image, '--pipeline_root', pipeline_root, ] # call v2 backend compiler CLI to compile pipeline spec to argo workflow subprocess.check_call(args, stdout=f) return client.create_run_from_pipeline_package( pipeline_file=argo_workflow_spec, arguments={}, enable_caching=enable_caching) " 1572,"def load_wine(return_X_y=False, as_frame=False): """"""Load and return the wine dataset (classification). .. versionadded:: 0.18 The wine dataset is a classic and very easy multi-class classification dataset. ================= ============== Classes 3 Samples per class [59,71,48] Samples total 178 Dimensionality 13 Features real, positive ================= ============== Read more in the :ref:`User Guide `. Parameters ---------- return_X_y : boolean, default=False. If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. as_frame : boolean, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric, string or categorical). The target is a pandas DataFrame or Series depending on the number of target_columns. .. versionadded:: 0.23 Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data', the data to learn, 'target', the classification labels, 'target_names', the meaning of the labels, 'feature_names', the meaning of the features, and 'DESCR', the full description of the dataset. (data, target) : tuple if ``return_X_y`` is True frame : pandas DataFrame Only present when `as_frame=True`. DataFrame with ``data`` and ``target``. .. versionadded:: 0.23 The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit standard format from: https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data Examples -------- Let's say you are interested in the samples 10, 80, and 140, and want to know their class name. >>> from sklearn.datasets import load_wine >>> data = load_wine() >>> data.target[[10, 80, 140]] array([0, 1, 2]) >>> list(data.target_names) ['class_0', 'class_1', 'class_2'] """""" module_path = dirname(__file__) data, target, target_names = load_data(module_path, 'wine_data.csv') with open(join(module_path, 'descr', 'wine_data.rst')) as rst_file: fdescr = rst_file.read() feature_names = ['alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium', 'total_phenols', 'flavanoids', 'nonflavanoid_phenols', 'proanthocyanins', 'color_intensity', 'hue', 'od280/od315_of_diluted_wines', 'proline'] frame = None target_columns = ['target', ] if as_frame: frame, data, target = _convert_data_dataframe(""load_wine"", data, target, feature_names, target_columns) if return_X_y: return data, target return Bunch(data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names) ","def load_wine(return_X_y=False, as_frame=False): """"""Load and return the wine dataset (classification). .. versionadded:: 0.18 The wine dataset is a classic and very easy multi-class classification dataset. ================= ============== Classes 3 Samples per class [59,71,48] Samples total 178 Dimensionality 13 Features real, positive ================= ============== Read more in the :ref:`User Guide `. Parameters ---------- return_X_y : boolean, default=False. If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. as_frame : boolean, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric, string or categorical). The target is a pandas DataFrame or Series depending on the number of target columns. .. versionadded:: 0.23 Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data', the data to learn, 'target', the classification labels, 'target_names', the meaning of the labels, 'feature_names', the meaning of the features, and 'DESCR', the full description of the dataset. (data, target) : tuple if ``return_X_y`` is True frame : pandas DataFrame Only present when `as_frame=True`. DataFrame with ``data`` and ``target``. .. versionadded:: 0.23 The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit standard format from: https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data Examples -------- Let's say you are interested in the samples 10, 80, and 140, and want to know their class name. >>> from sklearn.datasets import load_wine >>> data = load_wine() >>> data.target[[10, 80, 140]] array([0, 1, 2]) >>> list(data.target_names) ['class_0', 'class_1', 'class_2'] """""" module_path = dirname(__file__) data, target, target_names = load_data(module_path, 'wine_data.csv') with open(join(module_path, 'descr', 'wine_data.rst')) as rst_file: fdescr = rst_file.read() feature_names = ['alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium', 'total_phenols', 'flavanoids', 'nonflavanoid_phenols', 'proanthocyanins', 'color_intensity', 'hue', 'od280/od315_of_diluted_wines', 'proline'] frame = None target_columns = ['target', ] if as_frame: frame, data, target = _convert_data_dataframe(""load_wine"", data, target, feature_names, target_columns) if return_X_y: return data, target return Bunch(data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names) " 45,"def collapse_multiple_space(s): return MULTIPLE_SPACES_RE.sub(' ', s) ","def collapse_multiple_space(s): return MULTIPLE_SPACES_RE.sub(r' ', s) " 48430,"def run(module, result): """"""compares config against passed configuration to asnible to create an update list and applies to the edgeos device. .. warning:: docstring added long after code written, requires verification of Arguments and Returns - please update if you see any errors :param module: ansible module for self ref :type module: ansible.module :param result: result dict to be populated process :type result: dict """""" # get the current active config from the node or passed in via # the config param config = module.params['config'] or get_config(module) # create the candidate config object from the arguments candidate = get_candidate(module) # create loadable config from updates, also return unmanaged and invalid # commands updates, unmanaged_config, invalid_commands = diff_config(module, candidate, config) result['commands'] = sorted(updates) result['unmanaged'] = sorted(unmanaged_config) result['invalid'] = sorted(invalid_commands) commit = not module.check_mode comment = module.params['comment'] if result.get('commands'): load_config(module, updates, commit=commit, comment=comment) result['changed'] = True if result.get('unmanaged'): result['warnings'].append('Some configuration commands were ' 'unmanaged, review unmanaged list') if result.get('invalid'): result['warnings'].append('Some configuration commands were ' 'invalid, review invalid list') ","def run(module, result): """"""compares config against passed configuration to asnible to create an update list and applies to the edgeos device. .. warning:: docstring added long after code written, requires verification of Arguments and Returns - please update if you see any errors :param module: ansible module for self ref :type module: ansible.module :param result: result dict to be populated process :type result: dict """""" # get the current active config from the node or passed in via # the config param config = module.params['config'] or get_config(module) # create the candidate config object from the arguments candidate = get_candidate(module) # create loadable config from updates, also return unmanaged and invalid # commands updates, unmanaged_config, invalid_commands = diff_config(module, candidate, config) result['commands'] = sorted(updates) result['unmanaged'] = sorted(unmanaged_config) result['invalid'] = sorted(invalid_commands) commit = not module.check_mode comment = module.params['comment'] if result.get('commands'): load_config(module, updates, commit=commit, comment=comment) result['changed'] = True if result.get('unmanaged'): result['warnings'].append('Some configuration commands were unmanaged, review unmanaged list') if result.get('invalid'): result['warnings'].append('Some configuration commands were invalid, review invalid list') " 6374,"def set_caller_information(doc, state): '''Called from hooks on creation of Lead or Contact''' if doc.doctype not in ['Lead', 'Contact']: return numbers = [doc.get('phone'), doc.get('mobile_no')] # contact for Contact and lead for Lead fieldname = doc.doctype.lower() # contact_name or lead_name display_name_field = '{}_name'.format(fieldname) # Contact now has all the nos saved in child table if doc.doctype == 'Contact': numbers = [nos.phone for nos in doc.phone_nos] for number in numbers: number = strip_number(number) if not number: continue filters = frappe._dict({ 'from': ['like', '%{}'.format(number)], fieldname: '' }) logs = frappe.get_all('Call Log', filters=filters) for log in logs: frappe.db.set_value('Call Log', log.name, { fieldname: doc.name, display_name_field: doc.get_title() }, update_modified=False) ","def set_caller_information(doc, state): '''Called from hooks on creation of Lead or Contact''' if doc.doctype not in ['Lead', 'Contact']: return numbers = [doc.get('phone'), doc.get('mobile_no')] # contact for Contact and lead for Lead fieldname = doc.doctype.lower() # contact_name or lead_name display_name_field = '{}_name'.format(fieldname) # Contact now has all the nos saved in child table if doc.doctype == 'Contact': numbers = [d.phone for d in doc.phone_nos] for number in numbers: number = strip_number(number) if not number: continue filters = frappe._dict({ 'from': ['like', '%{}'.format(number)], fieldname: '' }) logs = frappe.get_all('Call Log', filters=filters) for log in logs: frappe.db.set_value('Call Log', log.name, { fieldname: doc.name, display_name_field: doc.get_title() }, update_modified=False) " 22771,"def _report_next_steps(config: interfaces.IConfig, installer_err: Optional[errors.Error], lineage: Optional[storage.RenewableCert], new_or_renewed_cert: bool = True) -> None: """"""Displays post-run/certonly advice to the user about renewal and installation. The output varies by runtime configuration and any errors encountered during installation. :param config: Configuration object :type config: interfaces.IConfig :param installer_err: The installer/enhancement error encountered, if any. :type error: Optional[errors.Error] :param lineage: The resulting certificate lineage from the issuance, if any. :type lineage: Optional[storage.RenewableCert] :param bool new_or_renewed_cert: Whether the verb execution resulted in a certificate being saved (created or renewed). """""" steps: List[str] = [] # If the installation or enhancement raised an error, show advice on trying again if installer_err: steps.append( ""The certificate was saved, but could not be installed (installer: "" f""{config.installer}). After fixing the error shown below, try installing it again "" f""by running:\n {cli.cli_command} install --cert-name "" f""{_cert_name_from_config_or_lineage(config, lineage)}"" ) # If a certificate was obtained or renewed, show applicable renewal advice if new_or_renewed_cert: if config.csr: steps.append( ""Certificates created using --csr will not be renewed automatically by Certbot. "" ""You will need to renew the certificate before it expires, by running the same "" ""Certbot command again."") elif _is_interactive_only_auth(config): steps.append( ""This certificate will not be renewed automatically by Certbot. The --manual "" ""plugin requires the use of an authentication hook script (--manual-auth-hook) "" ""in order to support autorenewal. To renew this certificate, repeat this same "" f""{cli.cli_command} command, before the certificate's expiry date."" ) elif not config.preconfigured_renewal: steps.append( ""The certificate will need to be renewed before it expires. Certbot can "" ""automatically renew the certificate in the background, but you may need "" ""to take steps to enable that functionality. "" ""See https://certbot.org/renewal-setup for instructions."") if not steps: return # TODO: refactor ANSI escapes during https://github.com/certbot/certbot/issues/8848 (bold_on, bold_off) = [c if sys.stdout.isatty() and not config.quiet else '' \ for c in (util.ANSI_SGR_BOLD, util.ANSI_SGR_RESET)] print(bold_on, '\n', 'NEXT STEPS:', bold_off, sep='') for step in steps: display_util.notify(f""- {step}"") # If there was an installer error, segregate the error output with a trailing newline if installer_err: print() ","def _report_next_steps(config: interfaces.IConfig, installer_err: Optional[errors.Error], lineage: Optional[storage.RenewableCert], new_or_renewed_cert: bool = True) -> None: """"""Displays post-run/certonly advice to the user about renewal and installation. The output varies by runtime configuration and any errors encountered during installation. :param config: Configuration object :type config: interfaces.IConfig :param installer_err: The installer/enhancement error encountered, if any. :type error: Optional[errors.Error] :param lineage: The resulting certificate lineage from the issuance, if any. :type lineage: Optional[storage.RenewableCert] :param bool new_or_renewed_cert: Whether the verb execution resulted in a certificate being saved (created or renewed). """""" steps: List[str] = [] # If the installation or enhancement raised an error, show advice on trying again if installer_err: steps.append( ""The certificate was saved, but could not be installed (installer: "" f""{config.installer}). After fixing the error shown below, try installing it again "" f""by running:\n {cli.cli_command} install --cert-name "" f""{_cert_name_from_config_or_lineage(config, lineage)}"" ) # If a certificate was obtained or renewed, show applicable renewal advice if new_or_renewed_cert: if config.csr: steps.append( ""Certificates created using --csr will not be renewed automatically by Certbot. "" ""You will need to renew the certificate before it expires, by running the same "" ""Certbot command again."") elif _is_interactive_only_auth(config): steps.append( ""This certificate will not be renewed automatically by Certbot. The --manual "" ""plugin requires the use of an authentication hook script (--manual-auth-hook) "" ""in order to support autorenewal. To renew this certificate, repeat this same "" f""{cli.cli_command} command before the certificate's expiry date."" ) elif not config.preconfigured_renewal: steps.append( ""The certificate will need to be renewed before it expires. Certbot can "" ""automatically renew the certificate in the background, but you may need "" ""to take steps to enable that functionality. "" ""See https://certbot.org/renewal-setup for instructions."") if not steps: return # TODO: refactor ANSI escapes during https://github.com/certbot/certbot/issues/8848 (bold_on, bold_off) = [c if sys.stdout.isatty() and not config.quiet else '' \ for c in (util.ANSI_SGR_BOLD, util.ANSI_SGR_RESET)] print(bold_on, '\n', 'NEXT STEPS:', bold_off, sep='') for step in steps: display_util.notify(f""- {step}"") # If there was an installer error, segregate the error output with a trailing newline if installer_err: print() " 504,"def get_new_sso_user_project_name_from_session(request): """""" This gets the project name from sso user data stored in the session. :param request: HttpRequest :return: String (project name) or None """""" return request.session.get('ssoNewUserData', {}).get( 'project_name' ) ","def get_new_sso_user_project_name_from_session(request): """""" This gets the project name from sso user data stored in the session. :param request: HttpRequest :return: String (project name) or None """""" return request.session.get('ssoNewUserData', {}).get('project_name') " 8810,"def test_isupport_getitem_ci(): """"""Test access to parameters is case insensitive."""""" instance = isupport.ISupport(awaylen=50) assert 'AWAYLEN' in instance assert 'awaylen' in instance assert instance['AWAYLEN'] == 50 assert instance['awaylen'] == 50 ","def test_isupport_getitem_case_insensitive(): """"""Test access to parameters is case insensitive."""""" instance = isupport.ISupport(awaylen=50) assert 'AWAYLEN' in instance assert 'awaylen' in instance assert instance['AWAYLEN'] == 50 assert instance['awaylen'] == 50 " 54501,"def _calculate_griddata( trials: List[FrozenTrial], x_param: str, x_indices: List[Union[str, int, float]], y_param: str, y_indices: List[Union[str, int, float]], contour_point_num: int, target: Optional[Callable[[FrozenTrial], float]], ) -> Tuple[ np.ndarray, np.ndarray, np.ndarray, List[Union[int, float]], List[Union[int, float]], List[Union[int, float]], List[Union[int, float]], List[int], List[str], List[int], List[str], int, int, ]: # Extract values for x, y, z axes from each trail. x_values = [] y_values = [] z_values = [] x_range_values = [] y_range_values = [] for trial in trials: contains_x_parm = x_param in trial.params if contains_x_parm: x_range_values.append(trial.params[x_param]) contains_y_param = y_param in trial.params if contains_y_param: y_range_values.append(trial.params[y_param]) if not contains_x_parm or not contains_y_param: continue x_values.append(trial.params[x_param]) y_values.append(trial.params[y_param]) if target is None: value = trial.value else: value = target(trial) if isinstance(value, int): value = float(value) elif not isinstance(value, float): raise ValueError( ""Trial{} has COMPLETE state, but its target value is non-numeric."".format( trial.number ) ) z_values.append(value) # Return empty values when x or y has no value. if len(x_values) == 0 or len(y_values) == 0: return ( np.array([]), np.array([]), np.array([]), x_values, y_values, [], [], [], [], [], [], 0, 0, ) # Add dummy values for grid data calculation when a parameter has one unique value. x_values_dummy = [] y_values_dummy = [] if len(set(x_values)) == 1: x_values_dummy = [x for x in x_indices if x not in x_values] x_values = x_values + x_values_dummy * len(x_values) y_values = y_values + (y_values * len(x_values_dummy)) z_values = z_values + (z_values * len(x_values_dummy)) if len(set(y_values)) == 1: y_values_dummy = [y for y in y_indices if y not in y_values] y_values = y_values + y_values_dummy * len(y_values) x_values = x_values + (x_values * len(y_values_dummy)) z_values = z_values + (z_values * len(y_values_dummy)) # Convert categorical values to int. cat_param_labels_x = [] # type: List[str] cat_param_pos_x = [] # type: List[int] cat_param_labels_y = [] # type: List[str] cat_param_pos_y = [] # type: List[int] if not _is_numerical(trials, x_param): enc = _LabelEncoder() x_range_values = enc.fit_transform(list(map(str, x_range_values))) x_values = enc.transform(list(map(str, x_values))) cat_param_labels_x = enc.get_labels() cat_param_pos_x = enc.get_indices() if not _is_numerical(trials, y_param): enc = _LabelEncoder() y_range_values = enc.fit_transform(list(map(str, y_range_values))) y_values = enc.transform(list(map(str, y_values))) cat_param_labels_y = enc.get_labels() cat_param_pos_y = enc.get_indices() # Calculate min and max of x and y. x_values_min = min(x_range_values) x_values_max = max(x_range_values) y_values_min = min(y_range_values) y_values_max = max(y_range_values) # Calculate grid data points. # For x and y, create 1-D array of evenly spaced coordinates on linear or log scale. xi = np.array([]) yi = np.array([]) zi = np.array([]) if _is_log_scale(trials, x_param): padding_x = (np.log10(x_values_max) - np.log10(x_values_min)) * AXES_PADDING_RATIO x_values_min = np.power(10, np.log10(x_values_min) - padding_x) x_values_max = np.power(10, np.log10(x_values_max) + padding_x) xi = np.logspace(np.log10(x_values_min), np.log10(x_values_max), contour_point_num) else: padding_x = (x_values_max - x_values_min) * AXES_PADDING_RATIO x_values_min -= padding_x x_values_max += padding_x xi = np.linspace(x_values_min, x_values_max, contour_point_num) if _is_log_scale(trials, y_param): padding_y = (np.log10(y_values_max) - np.log10(y_values_min)) * AXES_PADDING_RATIO y_values_min = np.power(10, np.log10(y_values_min) - padding_y) y_values_max = np.power(10, np.log10(y_values_max) + padding_y) yi = np.logspace(np.log10(y_values_min), np.log10(y_values_max), contour_point_num) else: padding_y = (y_values_max - y_values_min) * AXES_PADDING_RATIO y_values_min -= padding_y y_values_max += padding_y yi = np.linspace(y_values_min, y_values_max, contour_point_num) # create irregularly spaced map of trial values # and interpolate it with Plotly's interpolation formulation if x_param != y_param: zmap = _create_zmap(x_values, y_values, z_values, xi, yi) zi = _interpolate_zmap(zmap, contour_point_num) return ( xi, yi, zi, x_values, y_values, [x_values_min, x_values_max], [y_values_min, y_values_max], cat_param_pos_x, cat_param_labels_x, cat_param_pos_y, cat_param_labels_y, len(x_values_dummy), len(y_values_dummy), ) ","def _calculate_griddata( trials: List[FrozenTrial], x_param: str, x_indices: List[Union[str, int, float]], y_param: str, y_indices: List[Union[str, int, float]], contour_point_num: int, target: Optional[Callable[[FrozenTrial], float]], ) -> Tuple[ np.ndarray, np.ndarray, np.ndarray, List[Union[int, float]], List[Union[int, float]], List[Union[int, float]], List[Union[int, float]], List[int], List[str], List[int], List[str], int, int, ]: # Extract values for x, y, z axes from each trail. x_values = [] y_values = [] z_values = [] x_range_values = [] y_range_values = [] for trial in trials: contains_x_param = x_param in trial.params if contains_x_param: x_range_values.append(trial.params[x_param]) contains_y_param = y_param in trial.params if contains_y_param: y_range_values.append(trial.params[y_param]) if not contains_x_param or not contains_y_param: continue x_values.append(trial.params[x_param]) y_values.append(trial.params[y_param]) if target is None: value = trial.value else: value = target(trial) if isinstance(value, int): value = float(value) elif not isinstance(value, float): raise ValueError( ""Trial{} has COMPLETE state, but its target value is non-numeric."".format( trial.number ) ) z_values.append(value) # Return empty values when x or y has no value. if len(x_values) == 0 or len(y_values) == 0: return ( np.array([]), np.array([]), np.array([]), x_values, y_values, [], [], [], [], [], [], 0, 0, ) # Add dummy values for grid data calculation when a parameter has one unique value. x_values_dummy = [] y_values_dummy = [] if len(set(x_values)) == 1: x_values_dummy = [x for x in x_indices if x not in x_values] x_values = x_values + x_values_dummy * len(x_values) y_values = y_values + (y_values * len(x_values_dummy)) z_values = z_values + (z_values * len(x_values_dummy)) if len(set(y_values)) == 1: y_values_dummy = [y for y in y_indices if y not in y_values] y_values = y_values + y_values_dummy * len(y_values) x_values = x_values + (x_values * len(y_values_dummy)) z_values = z_values + (z_values * len(y_values_dummy)) # Convert categorical values to int. cat_param_labels_x = [] # type: List[str] cat_param_pos_x = [] # type: List[int] cat_param_labels_y = [] # type: List[str] cat_param_pos_y = [] # type: List[int] if not _is_numerical(trials, x_param): enc = _LabelEncoder() x_range_values = enc.fit_transform(list(map(str, x_range_values))) x_values = enc.transform(list(map(str, x_values))) cat_param_labels_x = enc.get_labels() cat_param_pos_x = enc.get_indices() if not _is_numerical(trials, y_param): enc = _LabelEncoder() y_range_values = enc.fit_transform(list(map(str, y_range_values))) y_values = enc.transform(list(map(str, y_values))) cat_param_labels_y = enc.get_labels() cat_param_pos_y = enc.get_indices() # Calculate min and max of x and y. x_values_min = min(x_range_values) x_values_max = max(x_range_values) y_values_min = min(y_range_values) y_values_max = max(y_range_values) # Calculate grid data points. # For x and y, create 1-D array of evenly spaced coordinates on linear or log scale. xi = np.array([]) yi = np.array([]) zi = np.array([]) if _is_log_scale(trials, x_param): padding_x = (np.log10(x_values_max) - np.log10(x_values_min)) * AXES_PADDING_RATIO x_values_min = np.power(10, np.log10(x_values_min) - padding_x) x_values_max = np.power(10, np.log10(x_values_max) + padding_x) xi = np.logspace(np.log10(x_values_min), np.log10(x_values_max), contour_point_num) else: padding_x = (x_values_max - x_values_min) * AXES_PADDING_RATIO x_values_min -= padding_x x_values_max += padding_x xi = np.linspace(x_values_min, x_values_max, contour_point_num) if _is_log_scale(trials, y_param): padding_y = (np.log10(y_values_max) - np.log10(y_values_min)) * AXES_PADDING_RATIO y_values_min = np.power(10, np.log10(y_values_min) - padding_y) y_values_max = np.power(10, np.log10(y_values_max) + padding_y) yi = np.logspace(np.log10(y_values_min), np.log10(y_values_max), contour_point_num) else: padding_y = (y_values_max - y_values_min) * AXES_PADDING_RATIO y_values_min -= padding_y y_values_max += padding_y yi = np.linspace(y_values_min, y_values_max, contour_point_num) # create irregularly spaced map of trial values # and interpolate it with Plotly's interpolation formulation if x_param != y_param: zmap = _create_zmap(x_values, y_values, z_values, xi, yi) zi = _interpolate_zmap(zmap, contour_point_num) return ( xi, yi, zi, x_values, y_values, [x_values_min, x_values_max], [y_values_min, y_values_max], cat_param_pos_x, cat_param_labels_x, cat_param_pos_y, cat_param_labels_y, len(x_values_dummy), len(y_values_dummy), ) " 34496,"def convert_nlu(training_data_path: Path, output_path: Path, source_path: Path): reader = MarkdownReader() writer = RasaYAMLWriter() training_data = reader.read(training_data_path) writer.dump(output_path, training_data) print_success(f""Converted NLU file: '{source_path}' >> '{output_path}'"") ","def convert_nlu(training_data_path: Path, output_path: Path, source_path: Path): reader = MarkdownReader() writer = RasaYAMLWriter() training_data = reader.read(training_data_path) writer.dump(output_path, training_data) print_success(f""Converted NLU file: '{source_path}' >> '{output_path}'."") " 44479,"def get_snap_version() -> str: return environ[""SNAP_VERSION""] ","def get_snap_version() -> str: return os.getenv(""SNAP_VERSION"", ""(unknown)"") " 116,"def _get_amazon_metadata( id_: str, id_type: str = 'isbn', resources=None, retries: int = 3, sleep_sec: float = 0.1, ) -> dict | None: """"""Uses the Amazon Product Advertising API ItemLookup operation to locatate a specific book by identifier; either 'isbn' or 'asin'. https://docs.aws.amazon.com/AWSECommerceService/latest/DG/ItemLookup.html :param str id_: The item id: isbn (10/13), or Amazon ASIN. :param str id_type: 'isbn' or 'asin'. :param ??? resources: ??? :param int retries: Number of times to query affiliate server before returning None :param float sleep_sec: Delay time.sleep(sleep_sec) seconds before each retry :return: A single book item's metadata, or None. :rtype: dict or None """""" if not affiliate_server_url: return None if id_type == 'isbn': id_ = normalize_isbn(id_) if len(id_) == 13 and id_.startswith('978'): id_ = isbn_13_to_isbn_10(id_) try: r = requests.get(f'http://{affiliate_server_url}/isbn/{id_}') r.raise_for_status() if hit := r.json().get('hit'): return hit if retries <= 1: return None time.sleep(sleep_sec) # sleep before recursive call return _get_amazon_metadata(id_, id_type, resources, retries - 1, sleep_sec) except requests.exceptions.ConnectionError: logger.exception(""Affiliate Server unreachable"") except requests.exceptions.HTTPError: logger.exception(f""Affiliate Server: id {id_} not found"") return None ","def _get_amazon_metadata( id_: str, id_type: str = 'isbn', resources=None, retries: int = 3, sleep_sec: float = 0.1, ) -> dict | None: """"""Uses the Amazon Product Advertising API ItemLookup operation to locatate a specific book by identifier; either 'isbn' or 'asin'. https://docs.aws.amazon.com/AWSECommerceService/latest/DG/ItemLookup.html :param str id_: The item id: isbn (10/13), or Amazon ASIN. :param str id_type: 'isbn' or 'asin'. :param resources: Used for AWSE Commerce Service lookup -- See Amazon docs :param int retries: Number of times to query affiliate server before returning None :param float sleep_sec: Delay time.sleep(sleep_sec) seconds before each retry :return: A single book item's metadata, or None. :rtype: dict or None """""" if not affiliate_server_url: return None if id_type == 'isbn': id_ = normalize_isbn(id_) if len(id_) == 13 and id_.startswith('978'): id_ = isbn_13_to_isbn_10(id_) try: r = requests.get(f'http://{affiliate_server_url}/isbn/{id_}') r.raise_for_status() if hit := r.json().get('hit'): return hit if retries <= 1: return None time.sleep(sleep_sec) # sleep before recursive call return _get_amazon_metadata(id_, id_type, resources, retries - 1, sleep_sec) except requests.exceptions.ConnectionError: logger.exception(""Affiliate Server unreachable"") except requests.exceptions.HTTPError: logger.exception(f""Affiliate Server: id {id_} not found"") return None " 49433,"def can_use_network(): """""" Return True if network access is allowed """""" try: import datalad HAVE_DATALAD = True except: HAVE_DATALAD = False if not HAVE_DATALAD: return False if os.environ.get('NOSETESTS_NO_NETWORK', False): return False if os.environ.get('TRAVIS') == 'true': return False return True ","def can_use_network(): """""" Return True if network access is allowed """""" try: import datalad HAVE_DATALAD = True except: HAVE_DATALAD = False if not HAVE_DATALAD: return False if os.environ.get('NOSETESTS_NO_NETWORK', False): return False if os.environ.get('TRAVIS') == 'true': return False return True " 12897,"def patch_pagination_args(field: DjangoConnectionField): """"""Add descriptions to pagination arguments in a connection field. By default Graphene's connection fields comes without description for pagination arguments. This functions patches those fields to add the descriptions. """""" field.args[""first""].description = ""Returns the first n elements from the list."" field.args[""last""].description = ""Returns the last n elements from the list."" field.args[ ""before"" ].description = ( ""Returns the elements in the list that come before the specified cursor."" ) field.args[ ""after"" ].description = ( ""Returns the elements in the list that come after the specified cursor."" ) ","def patch_pagination_args(field: DjangoConnectionField): """"""Add descriptions to pagination arguments in a connection field. By default Graphene's connection fields comes without description for pagination arguments. This functions patches those fields to add the descriptions. """""" field.args[""first""].description = ""Returns the first n elements from the list."" field.args[""last""].description = ""Return the last n elements from the list."" field.args[ ""before"" ].description = ( ""Returns the elements in the list that come before the specified cursor."" ) field.args[ ""after"" ].description = ( ""Returns the elements in the list that come after the specified cursor."" ) " 33534,"def create_zip_file_cli(source_path, base_dir, zip_file): # Using the native zip command can be an order of magnitude faster on Travis-CI source = ""*"" if source_path == base_dir else os.path.basename(source_path) command = ""cd %s; zip -r %s %s"" % (base_dir, zip_file, source) run(command) ","def create_zip_file_cli(source_path, base_dir, zip_file): # Using the native zip command can be an order of magnitude faster on Travis-CI source = ""*"" if source_path == base_dir else os.path.basename(source_path) run([""zip"", ""-r"", zip_file, source], cwd=base_dir) " 30751,"def get_remediation_data_command(client: Client, args: dict, no_output_mode: bool) -> List[Dict[str, Any]]: """"""Get SafeBreach remediation data. Arguments: client {Client} -- Client derives from BaseClient args {dict} -- function arguments no_output_mode {bool} -- if true, this function will insert data to the context, otherwise, it will just returns the data. Keyword Arguments: Returns: Dict -- Each key is a unique SafeBreach data type. Each value is a list of the data. """""" insight_id: Optional[int] = args.get('insightId') response = client.get_remediation_data(insight_id) insight: Any = get_insights_command(client, {'insightIds': [insight_id]}, False) if insight: insight = insight[0] if response.status_code < 200 or response.status_code >= 300: raise DemistoException(f'Failed to fetch remediation data for insight id {insight_id}') sb_remediation_data = response.json().get('remediationData') processed_data = extract_data(sb_remediation_data) readable_output_list = generate_readable_output(processed_data) vendor_remediation_data = list(filter(lambda o: o['value'], [{'type': ""Splunk"", ""value"": get_splunk_remedation_query(response)}])) # Demisto Context: dbot_score_list = [] standard_context_dict = {} secondary_standard_context_dict: Any = {} secondary_standard_context_list = [] secondary_path = '' # SafeBreach Context: safebreach_context_list = [] safebreach_context = { ""Id"": insight_id, 'RawRemediationData': processed_data, 'VendorRemediationData': vendor_remediation_data } safebreach_context_list.append(safebreach_context) for item in processed_data: if item['type'].startswith('Attack') or len(processed_data) == 0: continue standard_context_list: Any = [] demisto_standard_path = get_demisto_context_path(item['type']) # e.g URL(val.Data == obj.Data) demisto_data_type = SAFEBREACH_TO_DEMISTO_MAPPER.get(item['type']) # SHA256,Port,Protocol,Data,Command,URI if item['type'] in ['DropPaths', 'URIs', 'URI', 'Command']: item[""value""] = item[""value""].encode('utf-8').decode('unicode_escape').encode('latin1').decode('utf-8') if demisto_data_type: dbot_score = { ""Indicator"": item[""value""], 'type': get_dbot_type(item['type'], item[""value""]), # TODO: maybe change it to SB_Indicator? ""Vendor"": ""SafeBreach"", ""Score"": 3 # TODO: Change to is behaviroal set to defaults } primary_standard_context = { demisto_data_type: item[""value""], # e.g Data : , SHA256: ""Malicious"": { ""Description"": f""SafeBreach Insights - ({insight_id}){insight.get('actionBasedTitle')}"", ""Vendor"": ""SafeBreach"" } } if item['type'] in ['FQDNs/IPs', 'FQDN/IP']: if re.match(IP_REGEX, item[""value""]): secondary_path = 'IP(val.Address == obj.Address)' secondary_standard_context_dict = { 'IP': item[""value""], ""Malicious"": { ""Description"": f""SafeBreach Insights - ({insight_id}){insight.get('actionBasedTitle')}"", ""Vendor"": ""SafeBreach"" } } else: secondary_path = 'Domain(val.Name == obj.Name)' secondary_standard_context_dict = { 'Name': item[""value""], ""Malicious"": { ""Description"": f""SafeBreach Insights - ({insight_id}){insight.get('actionBasedTitle')}"", ""Vendor"": ""SafeBreach"" } } if demisto_standard_path: standard_context_list.append(primary_standard_context) secondary_standard_context_list.append(secondary_standard_context_dict) dbot_score_list.append(dbot_score) if len(standard_context_list) > 0 and demisto_standard_path: standard_context_dict[demisto_standard_path] = standard_context_list if secondary_path: standard_context_dict[secondary_path] = secondary_standard_context_list output_context = { ""DBotScore(val.Indicator == obj.Indicator)"": dbot_score_list, ""SafeBreach.Insight(val.Id == obj.Id)"": safebreach_context_list, } merged_context = {**output_context, **standard_context_dict} readable_output = tableToMarkdown(name=""Remediation Data"", t=readable_output_list, removeNull=True) if no_output_mode: return_outputs(readable_output=readable_output, outputs=merged_context) return processed_data ","def get_remediation_data_command(client: Client, args: dict, no_output_mode: bool) -> List[Dict[str, Any]]: """"""Get SafeBreach remediation data. Arguments: client {Client} -- Client derives from BaseClient args {dict} -- function arguments no_output_mode {bool} -- if true, this function will insert data to the context, otherwise, it will just returns the data. Keyword Arguments: Returns: Dict -- Each key is a unique SafeBreach data type. Each value is a list of the data. """""" insight_id: Optional[int] = args.get('insightId') response = client.get_remediation_data(insight_id) insight: Any = get_insights_command(client, {'insightIds': [insight_id]}, False) if insight: insight = insight[0] if response.status_code < 200 or response.status_code >= 300: raise DemistoException(f'Failed to fetch remediation data for insight id {insight_id}') sb_remediation_data = response.json().get('remediationData') processed_data = extract_data(sb_remediation_data) readable_output_list = generate_readable_output(processed_data) vendor_remediation_data = list(filter(lambda o: o['value'], [{'type': ""Splunk"", ""value"": get_splunk_remedation_query(response)}])) # Demisto Context: dbot_score_list = [] standard_context_dict = {} secondary_standard_context_dict: Any = {} secondary_standard_context_list = [] secondary_path = '' # SafeBreach Context: safebreach_context_list = [] safebreach_context = { ""Id"": insight_id, 'RawRemediationData': processed_data, 'VendorRemediationData': vendor_remediation_data } safebreach_context_list.append(safebreach_context) for item in processed_data: if item.get('type', '').startswith('Attack') or len(processed_data) == 0: continue standard_context_list: Any = [] demisto_standard_path = get_demisto_context_path(item['type']) # e.g URL(val.Data == obj.Data) demisto_data_type = SAFEBREACH_TO_DEMISTO_MAPPER.get(item['type']) # SHA256,Port,Protocol,Data,Command,URI if item['type'] in ['DropPaths', 'URIs', 'URI', 'Command']: item[""value""] = item[""value""].encode('utf-8').decode('unicode_escape').encode('latin1').decode('utf-8') if demisto_data_type: dbot_score = { ""Indicator"": item[""value""], 'type': get_dbot_type(item['type'], item[""value""]), # TODO: maybe change it to SB_Indicator? ""Vendor"": ""SafeBreach"", ""Score"": 3 # TODO: Change to is behaviroal set to defaults } primary_standard_context = { demisto_data_type: item[""value""], # e.g Data : , SHA256: ""Malicious"": { ""Description"": f""SafeBreach Insights - ({insight_id}){insight.get('actionBasedTitle')}"", ""Vendor"": ""SafeBreach"" } } if item['type'] in ['FQDNs/IPs', 'FQDN/IP']: if re.match(IP_REGEX, item[""value""]): secondary_path = 'IP(val.Address == obj.Address)' secondary_standard_context_dict = { 'IP': item[""value""], ""Malicious"": { ""Description"": f""SafeBreach Insights - ({insight_id}){insight.get('actionBasedTitle')}"", ""Vendor"": ""SafeBreach"" } } else: secondary_path = 'Domain(val.Name == obj.Name)' secondary_standard_context_dict = { 'Name': item[""value""], ""Malicious"": { ""Description"": f""SafeBreach Insights - ({insight_id}){insight.get('actionBasedTitle')}"", ""Vendor"": ""SafeBreach"" } } if demisto_standard_path: standard_context_list.append(primary_standard_context) secondary_standard_context_list.append(secondary_standard_context_dict) dbot_score_list.append(dbot_score) if len(standard_context_list) > 0 and demisto_standard_path: standard_context_dict[demisto_standard_path] = standard_context_list if secondary_path: standard_context_dict[secondary_path] = secondary_standard_context_list output_context = { ""DBotScore(val.Indicator == obj.Indicator)"": dbot_score_list, ""SafeBreach.Insight(val.Id == obj.Id)"": safebreach_context_list, } merged_context = {**output_context, **standard_context_dict} readable_output = tableToMarkdown(name=""Remediation Data"", t=readable_output_list, removeNull=True) if no_output_mode: return_outputs(readable_output=readable_output, outputs=merged_context) return processed_data " 683,"def process_uri_list(uri_list, settings): """""" Receives a list of URIs (most likely from the 'crawl' or 'runspider' commands) and returns a dictionary of {URI: file format} """""" valid_output_formats = without_none_values( settings.getwithbase('FEED_EXPORTERS') ).keys() out = {} for element in uri_list: try: feed_uri, feed_format = element.rsplit(':', 1) except ValueError: feed_uri = element feed_format = os.path.splitext(element)[1].replace(""."", """") else: if feed_uri == '-': feed_uri = 'stdout:' if feed_format not in valid_output_formats: raise UsageError(""Unrecognized output format '%s', set one after a"" "" semicolon (i.e. -o :) or as a file"" "" extension, from the supported list %s"" % (feed_format, tuple(valid_output_formats))) out[feed_uri] = feed_format return out ","def process_uri_list(uri_list, settings): """""" Receives a list of URIs (most likely from the 'crawl' or 'runspider' commands) and returns a dictionary of {URI: file format} """""" valid_output_formats = without_none_values( settings.getwithbase('FEED_EXPORTERS') ).keys() out = {} for element in uri_list: try: feed_uri, feed_format = element.rsplit(':', 1) except ValueError: feed_uri = element feed_format = os.path.splitext(element)[1].replace(""."", """") else: if feed_uri == '-': feed_uri = 'stdout:' if feed_format not in valid_output_formats: raise UsageError(""Unrecognized output format '%s', set one after a"" "" colon (i.e. -o :) or as a file"" "" extension, from the supported list %s"" % (feed_format, tuple(valid_output_formats))) out[feed_uri] = feed_format return out " 57390,"def _enumerate_service_methods(client: Any) -> Iterator[str]: """"""Return an iterable of service methods from a generated Iface class."""""" ifaces_found = 0 for base_cls in inspect.getmro(client): if base_cls.__name__ == ""Iface"": for name, _ in inspect.getmembers(base_cls, inspect.isroutine): yield name ifaces_found += 1 assert ifaces_found > 0, ""class is not a thrift client; it has no Iface"" ","def _enumerate_service_methods(client: Any) -> Iterator[str]: """"""Return an iterable of service methods from a generated Iface class."""""" ifaces_found = 0 for base_cls in inspect.getmro(client): if base_cls.__name__ == ""Iface"": for name, _ in inspect.getmembers(base_cls, inspect.isroutine): yield name ifaces_found += 1 assert ifaces_found > 0, ""class is not a thrift client; it has no Iface"" " 41403,"def k8s_versions_with_all_features(): file_path = str(Path(__file__).parent) + ""/enable_all_features.yaml"" configs = [] with open(file_path) as f: feature_data = yaml.load(f, Loader=SafeLoader) for k8s_version in supported_k8s_versions: configs.append({""k8s_version"": k8s_version, ""values"": feature_data}) return configs ","def k8s_versions_with_all_features(): feature_data = yaml.safe_load((Path(__file__).parent / ""enable_all_features.yaml"").read_text()) for k8s_version in supported_k8s_versions: configs.append({""k8s_version"": k8s_version, ""values"": feature_data}) return configs " 2224,"def test_grid_search_failing_classifier(): # GridSearchCV with on_error != 'raise' # Ensures that a warning is raised and score reset where appropriate. X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we only want to check that errors caused by fits # to individual folds will be caught and warnings raised instead. If # refit was done, then an exception would be raised on refit and not # caught by grid_search (expected behavior), and this would cause an # error in this test. gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=0.0) warning_message = ( ""Estimator fit failed. The score on this train-test partition "" ""for these parameters will be set to 0.000000."" ) with pytest.warns(FitFailedWarning, match=warning_message): gs.fit(X, y) n_candidates = len(gs.cv_results_['params']) # Ensure that grid scores were set to zero as required for those fits # that are expected to fail. def get_cand_scores(i): return np.array(list(gs.cv_results_['split%d_test_score' % s][i] for s in range(gs.n_splits_))) assert all((np.all(get_cand_scores(cand_i) == 0.0) for cand_i in range(n_candidates) if gs.cv_results_['param_parameter'][cand_i] == FailingClassifier.FAILING_PARAMETER)) gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=float('nan')) warning_message = ( ""Estimator fit failed. The score on this train-test partition "" ""for these parameters will be set to nan."" ) with pytest.warns(FitFailedWarning, match=warning_message): gs.fit(X, y) n_candidates = len(gs.cv_results_['params']) assert all(np.all(np.isnan(get_cand_scores(cand_i))) for cand_i in range(n_candidates) if gs.cv_results_['param_parameter'][cand_i] == FailingClassifier.FAILING_PARAMETER) ranks = gs.cv_results_['rank_test_score'] # Check that succeeded estimators have lower ranks assert ranks[0] <= 2 and ranks[1] <= 2 # Check that failed estimator has the highest rank assert ranks[clf.FAILING_PARAMETER] == 3 assert gs.best_index_ != clf.FAILING_PARAMETER ","def test_grid_search_failing_classifier(): # GridSearchCV with on_error != 'raise' # Ensures that a warning is raised and score reset where appropriate. X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we only want to check that errors caused by fits # to individual folds will be caught and warnings raised instead. If # refit was done, then an exception would be raised on refit and not # caught by grid_search (expected behavior), and this would cause an # error in this test. gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=0.0) warning_message = ( ""Estimator fit failed. The score on this train-test partition "" ""for these parameters will be set to 0.0.*."" ) with pytest.warns(FitFailedWarning, match=warning_message): gs.fit(X, y) n_candidates = len(gs.cv_results_['params']) # Ensure that grid scores were set to zero as required for those fits # that are expected to fail. def get_cand_scores(i): return np.array(list(gs.cv_results_['split%d_test_score' % s][i] for s in range(gs.n_splits_))) assert all((np.all(get_cand_scores(cand_i) == 0.0) for cand_i in range(n_candidates) if gs.cv_results_['param_parameter'][cand_i] == FailingClassifier.FAILING_PARAMETER)) gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=float('nan')) warning_message = ( ""Estimator fit failed. The score on this train-test partition "" ""for these parameters will be set to nan."" ) with pytest.warns(FitFailedWarning, match=warning_message): gs.fit(X, y) n_candidates = len(gs.cv_results_['params']) assert all(np.all(np.isnan(get_cand_scores(cand_i))) for cand_i in range(n_candidates) if gs.cv_results_['param_parameter'][cand_i] == FailingClassifier.FAILING_PARAMETER) ranks = gs.cv_results_['rank_test_score'] # Check that succeeded estimators have lower ranks assert ranks[0] <= 2 and ranks[1] <= 2 # Check that failed estimator has the highest rank assert ranks[clf.FAILING_PARAMETER] == 3 assert gs.best_index_ != clf.FAILING_PARAMETER " 50782,"def _next_selected(items: Iterable[str], selected: Optional[str]) -> Optional[str]: """"""Return the next item in a item list starting at given value. If selected is missing in items, None is returned """""" try: cycle = itertools.cycle(items) current = next(cycle) starting = current while True: if current == selected: break current = next(cycle) if current == starting: return None return next(cycle) except StopIteration: return None ","def _next_selected(items: Iterable[str], selected: Optional[str]) -> Optional[str]: """"""Return the next item in a item list starting at given value. If selected is missing in items, None is returned """""" try: index = items.index(selected) except ValueError: return None next_item = 0 if index == len(items) - 1 else index + 1 return items[next_item] " 26002,"def load_arguments(self, _): # Model imports DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes', operation_group='disks') SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots') UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes') HyperVGenerationTypes = self.get_models('HyperVGenerationTypes') DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes') OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets') RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux') GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries') ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions') # REUSABLE ARGUMENT DEFINITIONS name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME') multi_ids_type = CLIArgumentType(nargs='+') existing_vm_name = CLIArgumentType(overrides=name_arg_type, configured_default='vm', help=""The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=`"", completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name') existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name') existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name') vmss_name_type = CLIArgumentType(name_arg_type, configured_default='vmss', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), help=""Scale set name. You can configure the default using `az configure --defaults vmss=`"", id_part='name') extension_instance_name_type = CLIArgumentType(help=""Name of extension instance, which can be customized. Default: name of the extension."") image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name') disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name') ephemeral_placement_type = CLIArgumentType(options_list=['--ephemeral-os-disk-placement', '--ephemeral-placement'], arg_type=get_enum_type(['ResourceDisk', 'CacheDisk']), min_api='2019-12-01') license_type = CLIArgumentType( help=""Specifies that the Windows image or disk was licensed on-premises. To enable Azure Hybrid Benefit for "" ""Windows Server, use 'Windows_Server'. To enable Multi-tenant Hosting Rights for Windows 10, "" ""use 'Windows_Client'. For more information see the Azure Windows VM online docs."", arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_BASE', 'RHEL_SAPAPPS', 'RHEL_SAPHA', 'RHEL_EUS', 'RHEL_BASESAPAPPS', 'RHEL_BASESAPHA', 'SLES_STANDARD', 'SLES_SAP', 'SLES_HPC', 'None', 'RHEL_ELS_6'])) # StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute DiskStorageAccountTypes = DiskStorageAccountTypes or self.get_models('StorageAccountTypes') if DiskStorageAccountTypes: disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes)) else: # StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) if SnapshotStorageAccountTypes: snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes)) else: # SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) # special case for `network nic scale-set list` command alias with self.argument_context('network nic scale-set list') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') HyperVGenerationTypes = HyperVGenerationTypes or self.get_models('HyperVGeneration', operation_group='disks') if HyperVGenerationTypes: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default=""V1"")) else: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type([""V1"", ""V2""], default=""V1"")) ultra_ssd_enabled_type = CLIArgumentType( arg_type=get_three_state_flag(), min_api='2018-06-01', help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account') scale_in_policy_type = CLIArgumentType( nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')), help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.' ) edge_zone_type = CLIArgumentType( help='The name of edge zone.', min_api='2020-12-01', is_preview=True ) t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries') shared_to_type = CLIArgumentType( arg_type=get_enum_type(t_shared_to), help='The query parameter to decide what shared galleries to fetch when doing listing operations. ' 'If not specified, list by subscription id.' ) marker_type = CLIArgumentType( help='A string value that identifies the portion of the list of containers to be ' 'returned with the next listing operation. The operation returns the NextMarker value within ' 'the response body if the listing operation did not return all containers remaining to be listed ' 'with the current page. If specified, this generator will begin returning results from the point ' 'where the previous generator stopped.') enable_vtpm_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.') enable_secure_boot_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.') security_type = CLIArgumentType(arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01', help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.') # region MixedScopes for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']: with self.argument_context(scope) as c: c.argument('tags', tags_type) for scope in ['disk', 'snapshot']: with self.argument_context(scope) as c: c.ignore('source_blob_uri', 'source_disk', 'source_snapshot') c.argument('source_storage_account_id', help='used when source blob is in a different subscription') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int) if self.supported_api_version(min_api='2018-09-30', operation_group='disks'): c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level') c.argument('for_upload', arg_type=get_three_state_flag(), help='Create the {0} for uploading blobs later on through storage commands. Run ""az {0} grant-access --access-level Write"" to retrieve the {0}\'s SAS token.'.format(scope)) c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') else: c.ignore('access_level', 'for_upload', 'hyper_v_generation') c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType', operation_group='disks')), help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.') c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.') c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.') operation_group = 'disks' if scope == 'disk' else 'snapshots' c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group))) c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.') c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable on-demand bursting beyond the provisioned performance target of the disk. On-demand bursting is disabled by default, and it does not apply to Ultra disks.') c.argument('public_network_access', arg_type=get_enum_type(['Disabled', 'Enabled']), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to control the export policy on the disk.') c.argument('accelerated_network', arg_type=get_three_state_flag(), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to enable the accelerated networking if the OS disk image support.') for scope in ['disk create', 'snapshot create']: with self.argument_context(scope) as c: c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name') # endregion # region Disks with self.argument_context('disk', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c: c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to. c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.') c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help=""The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10"") c.argument('upload_size_bytes', type=int, min_api='2019-03-01', help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520') c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time') c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10') c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk') c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('gallery_image_reference', help='ID of the Compute Gallery image version from which to create a disk') c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.') c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.') c.argument('edge_zone', edge_zone_type) c.argument('security_type', choices=['TrustedLaunch'], help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01') c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='disks')), min_api='2021-12-01', help='CPU architecture.') # endregion # region Snapshots with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c: c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=snapshot_sku) c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01', help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed') c.argument('edge_zone', edge_zone_type) c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='snapshots')), min_api='2021-12-01', help='CPU architecture.') # endregion # region Images with self.argument_context('image') as c: c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux'])) c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images')) c.argument('tags', tags_type) with self.argument_context('image create') as c: # here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources c.argument('name', arg_type=name_arg_type, help='new image name') c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name') c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name') c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. ' 'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage') c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.') c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help=""Storage caching type for the image's OS disk."") c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes), help=""Storage caching type for the image's data disk."") c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api=""2019-03-01"", help='The hypervisor generation of the Virtual Machine created from the image.') c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots') c.argument('edge_zone', edge_zone_type, ) # endregion # region Image Templates with self.argument_context('image builder') as c: ib_output_name_help = ""Name of the image builder run output."" c.argument('location', get_location_type(self.cli_ctx)) c.argument('scripts', nargs='+', help=""Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."" "" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'"") c.argument('source', options_list=[""--image-source"", ""-i""], help=""The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID."") c.argument('image_template_name', image_template_name_type, help=""The name of the image template."") c.argument('checksum', help=""The SHA256 checksum of the Red Hat ISO image"") c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g ""image_1=westus2 image_2=westus"". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.') c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g ""my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth."" ' 'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a ""/"". Each value is a comma-delimited list of replica locations.') c.argument('output_name', help=ib_output_name_help) c.ignore('destinations_lists', 'scripts_list', 'source_dict') with self.argument_context('image builder create') as c: ib_source_type = CLIArgumentType(arg_group=""Image Source"") ib_customizer_type = CLIArgumentType(arg_group=""Customizer"") ib_cutput_type = CLIArgumentType(arg_group=""Output"") c.argument('build_timeout', type=int, help=""The Maximum duration to wait while building the image template, in minutes. Default is 60."") c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json') c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.') # VM profile c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)') c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size') c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name') c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine') c.argument('proxy_vm_size', help='Size of the virtual machine used to build, customize and capture images (Standard_D1_v2 for Gen1 images and Standard_D2ds_v4 for Gen2 images).') c.argument('build_vm_identities', nargs='+', help='Optional configuration of the virtual network to use to deploy the build virtual machine in. Omit if no specific virtual network needs to be used.') # Image Source Arguments c.argument('source', arg_type=ib_source_type) c.argument('checksum', arg_type=ib_source_type) c.argument('', arg_type=ib_source_type) # Image Customizer Arguments c.argument('scripts', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) # Image Output Arguments c.argument('managed_image_destinations', arg_type=ib_cutput_type) c.argument('shared_image_destinations', arg_type=ib_cutput_type) c.argument('output_name', arg_type=ib_cutput_type) with self.argument_context('image builder output') as c: ib_sig_regions_help = ""Space-separated list of regions to replicate the image version into."" ib_img_location_help = ""Location where the customized image will be created."" c.argument('gallery_image_definition', arg_group=""Shared Image Gallery"", help=""Name or ID of the existing SIG image definition to create the customized image version with."") c.argument('gallery_name', arg_group=""Shared Image Gallery"", help=""Shared image gallery name, if image definition name and not ID was provided."") c.argument('gallery_replication_regions', arg_group=""Shared Image Gallery"", nargs='+', help=ib_sig_regions_help) c.argument('managed_image', arg_group=""Managed Image"", help=""Name or ID of the customized managed image to be created."") c.argument('managed_image_location', arg_group=""Managed Image"", help=ib_img_location_help) with self.argument_context('image builder output add') as c: ib_artifact_tags_help = ""Tags that will be applied to the output artifact once it has been created by the distributor. "" + tags_type.settings['help'] ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=[""--artifact-tags""]) ib_default_loc_help = "" Defaults to resource group's location."" c.argument('output_name', help=ib_output_name_help + "" Defaults to the name of the managed image or sig image definition."") c.argument('gallery_replication_regions', arg_group=""Shared Image Gallery"", nargs='+', help=ib_sig_regions_help + ib_default_loc_help) c.argument('managed_image_location', arg_group=""Managed Image"", help=ib_img_location_help + ib_default_loc_help) c.argument('is_vhd', arg_group=""VHD"", help=""The output is a VHD distributor."", action='store_true') c.argument('tags', arg_type=ib_artifact_tags_type) c.ignore('location') with self.argument_context('image builder customizer') as c: ib_win_restart_type = CLIArgumentType(arg_group=""Windows Restart"") ib_win_update_type = CLIArgumentType(arg_group=""Windows Update"") ib_script_type = CLIArgumentType(arg_group=""Shell and Powershell"") ib_powershell_type = CLIArgumentType(arg_group=""Powershell"") ib_file_customizer_type = CLIArgumentType(arg_group=""File"") c.argument('customizer_name', help=""Name of the customizer."") c.argument('customizer_type', options_list=['--type', '-t'], help=""Type of customizer to be added to the image template."", arg_type=get_enum_type(ScriptType)) # Script Args c.argument('script_url', arg_type=ib_script_type, help=""URL of script to customize the image with. The URL must be publicly accessible."") c.argument('inline_script', arg_type=ib_script_type, nargs='+', help=""Space-separated list of inline script lines to customize the image with."") # Powershell Specific Args c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help=""Space-separated list of valid exit codes, as integers"") # Windows Restart Specific Args c.argument('restart_command', arg_type=ib_win_restart_type, help=""Command to execute the restart operation."") c.argument('restart_check_command', arg_type=ib_win_restart_type, help=""Command to verify that restart succeeded."") c.argument('restart_timeout', arg_type=ib_win_restart_type, help=""Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)"", default=""5m"") # Windows Update Specific Args c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.') c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)') c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)') # File Args c.argument('file_source', arg_type=ib_file_customizer_type, help=""The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc."") c.argument('dest_path', arg_type=ib_file_customizer_type, help=""The absolute destination path where the file specified in --file-source will be downloaded to in the image"") # endregion # region AvailabilitySets with self.argument_context('vm availability-set') as c: c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') with self.argument_context('vm availability-set create') as c: c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set') c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.') c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.') c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks') with self.argument_context('vm availability-set update') as c: if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'): c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') c.argument('availability_set_name', options_list=['--availability-set-name']) # endregion # region VirtualMachines with self.argument_context('vm') as c: c.argument('vm_name', existing_vm_name) c.argument('size', completer=get_vm_size_completion_list) c.argument('name', arg_type=name_arg_type) c.argument('zone', zone_type, min_api='2017-03-30') c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify """" for none.', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH'])) c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network') c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.') with self.argument_context('vm capture') as c: c.argument('overwrite', action='store_true') with self.argument_context('vm update') as c: c.argument('os_disk', min_api='2017-12-01', help=""Managed OS disk ID or name to swap to"") c.argument('write_accelerator', nargs='*', min_api='2017-12-01', help=""enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2"") c.argument('disk_caching', nargs='*', help=""Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks"") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('size', help='The new size of the virtual machine. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--size`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create') as c: c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines')) c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None) c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage') c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.') c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.') c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify """" for none (\'""""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE'])) c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('boot_diagnostics_storage', help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS') c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network', help=""enable accelerated networking. Unless specified, CLI will enable it based on machine image and size"") if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE): VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help=""The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine"") c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.') c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01') c.argument('platform_fault_domain', min_api='2020-06-01', help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view') c.argument('count', type=int, is_preview=True, help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create', arg_group='Storage') as c: c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help=""Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together."") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help=""ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together."") with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help=""Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples."") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help=""ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples."") with self.argument_context('vm open-port') as c: c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.') c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name) c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true') c.argument('port', help=""The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range."") c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int) for scope in ['vm show', 'vm list']: with self.argument_context(scope) as c: c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow') for scope in ['vm show', 'vmss show']: with self.argument_context(scope) as c: c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01') for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']: with self.argument_context(scope) as c: c.ignore('include_user_data') with self.argument_context('vm diagnostics') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name']) with self.argument_context('vm diagnostics set') as c: c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts')) with self.argument_context('vm install-patches') as c: c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)') c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.') c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.') c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.') c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only') c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only') c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help=""Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only"") c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') with self.argument_context('vm disk') as c: c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines')) c.argument('new', action='store_true', help='create a new disk') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') with self.argument_context('vm disk attach') as c: c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator') c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)], help=""The name or ID of the managed disk"", id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('disks', nargs='*', help=""One or more names or IDs of the managed disk (space-delimited)."", completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('ids', deprecate_info=c.deprecate(target='--ids', redirect='--disks', hide=True)) with self.argument_context('vm disk detach') as c: c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.') with self.argument_context('vm encryption enable') as c: c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted. (Only supported for Linux virtual machines.)') # Place aad arguments in their own group aad_arguments = 'Azure Active Directory' c.argument('aad_client_id', arg_group=aad_arguments) c.argument('aad_client_secret', arg_group=aad_arguments) c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments) with self.argument_context('vm extension') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1') c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(expiration='3.0.0', hide=True)) with self.argument_context('vm extension list') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm extension show') as c: c.argument('instance_view', action='store_true', help='The instance view of a virtual machine extension.') with self.argument_context('vm secret') as c: c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query ""[?attributes.enabled].id"" -o tsv\'') c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault) c.argument('certificate', help='key vault certificate name or its full secret URL') c.argument('certificate_store', help='Windows certificate store names. Default: My') with self.argument_context('vm secret list') as c: c.argument('vm_name', arg_type=existing_vm_name, id_part=None) with self.argument_context('vm image') as c: c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('plan', help='image billing plan') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('version', help=""image sku's version"") c.argument('urn', help=""URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted"") with self.argument_context('vm image list') as c: c.argument('image_location', get_location_type(self.cli_ctx)) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-offers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-skus') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-publishers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image show') as c: c.argument('skus', options_list=['--sku', '-s']) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image terms') as c: c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted') c.argument('publisher', help='Image publisher') c.argument('offer', help='Image offer') c.argument('plan', help='Image billing plan') with self.argument_context('vm nic') as c: c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None) c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics) c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.') with self.argument_context('vm nic show') as c: c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic) with self.argument_context('vm unmanaged-disk') as c: c.argument('new', action='store_true', help='Create a new disk.') c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') c.argument('vhd_uri', help=""Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd"") with self.argument_context('vm unmanaged-disk attach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) with self.argument_context('vm unmanaged-disk detach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']: with self.argument_context(scope) as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm unmanaged-disk list') as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) with self.argument_context('vm user') as c: c.argument('username', options_list=['--username', '-u'], help='The user name') c.argument('password', options_list=['--password', '-p'], help='The user password') with self.argument_context('vm list-skus') as c: c.argument('size', options_list=['--size', '-s'], help=""size name, partial name is accepted"") c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help=""show skus supporting availability zones"") c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(), help=""show all information including vm sizes not available under the current subscription"") c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. ""availabilitySets"", ""snapshots"", ""disks"", etc') with self.argument_context('vm restart') as c: c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.') with self.argument_context('vm host') as c: c.argument('host_group_name', options_list=['--host-group'], id_part='name', help=""Name of the Dedicated Host Group"") c.argument('host_name', name_arg_type, id_part='child_name_1', help=""Name of the Dedicated Host"") c.ignore('expand') with self.argument_context('vm host create') as c: c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int, help=""Fault domain of the host within a group. Allowed values: 0, 1, 2"") c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(), help=""Replace the host automatically if a failure occurs"") c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes), help=""The software license type that will be applied to the VMs deployed on the dedicated host."") c.argument('sku', help=""SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/"") with self.argument_context('vm host list') as c: c.argument('host_group_name', id_part=None) with self.argument_context('vm host group') as c: c.argument('host_group_name', name_arg_type, id_part='name', help=""Name of the Dedicated Host Group"") c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Specify whether virtual machines or virtual machine scale sets can be placed automatically ' 'on the dedicated host group. Automatic placement means resources are allocated on dedicated ' 'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to ' 'false when not provided.') with self.argument_context('vm host group create') as c: c.argument('platform_fault_domain_count', options_list=[""--platform-fault-domain-count"", ""-c""], type=int, help=""Number of fault domains that the host group can span."") c.argument('zones', zone_type) for scope in [""vm host"", ""vm host group""]: with self.argument_context(""{} create"".format(scope)) as c: location_type = get_location_type(self.cli_ctx) custom_location_msg = "" Otherwise, location will default to the resource group's location"" custom_location_type = CLIArgumentType(overrides=location_type, help=location_type.settings[""help""] + custom_location_msg) c.argument('location', arg_type=custom_location_type) # endregion # region VMSS scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name'] with self.argument_context('vmss') as c: c.argument('zones', zones_type, min_api='2017-03-30') c.argument('instance_id', id_part='child_name_1') c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself') c.argument('tags', tags_type) c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type) c.argument('host_group', min_api='2020-06-01', help='Name or ID of dedicated host group that the virtual machine scale set resides in') for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']: with self.argument_context(scope) as c: for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c: VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('name', name_arg_type) c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.') c.argument('single_placement_group', arg_type=get_three_state_flag(), help=""Limit the scale set to a single placement group."" "" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details."") c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01') c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.') c.argument('instance_count', help='Number of VMs in the scale set.', type=int) c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true') c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode)) c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs') c.argument('vm_sku', help='Size of VMs in the scale set. Default to ""Standard_DS1_v2"". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network') c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help=""The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set"") c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long') c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.', arg_type=get_enum_type(['Uniform', 'Flexible'])) c.argument('scale_in_policy', scale_in_policy_type) c.argument('automatic_repairs_grace_period', min_api='2018-10-01', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.') c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('network_api_version', min_api='2021-03-01', help=""Specify the Microsoft.Network API version used when creating networking resources in the Network "" ""Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default "" ""value is 2020-11-01."") c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss create', arg_group='Network Balancer') as c: LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify """" for none.', options_list=['--app-gateway']) c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.') c.argument('app_gateway_sku', help='SKU when creating a new application gateway.') c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.') c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.') c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int) c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify """" for none.', options_list=['--load-balancer', '--lb']) c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName), help=""Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'"") c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name']) with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c: c.argument('public_ip_per_vm', action='store_true', help=""Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules"") c.argument('vm_domain_name', help=""domain name of VM instances, once configured, the FQDN is `vm..<..rest..>`"") c.argument('dns_servers', nargs='+', help=""space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6"") c.argument('accelerated_networking', arg_type=get_three_state_flag(), help=""enable accelerated networking. Unless specified, CLI will enable it based on machine image and size"") with self.argument_context('vmss update') as c: protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group=""Protection Policy"", min_api='2019-03-01') c.argument('protect_from_scale_in', arg_type=protection_policy_type, help=""Protect the VM instance from scale-in operations."") c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help=""Protect the VM instance from scale set actions (including scale-in)."") c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(), help='Enable terminate notification') c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('scale_in_policy', scale_in_policy_type) c.argument('force_deletion', action='store_true', is_preview=True, help='This property allow you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('vm_sku', help='The new size of the virtual machine instances in the scale set. Default to ""Standard_DS1_v2"". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--vm-sku`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c: c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs') c.argument( 'automatic_repairs_grace_period', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.' ) c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') for scope in ['vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('terminate_notification_time', min_api='2019-03-01', help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted') c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01', help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%') c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%') c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%') c.argument('pause_time_between_batches', min_api='2020-12-01', help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds') c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size') c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances') for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]: with self.argument_context(scope) as c: c.argument('instance_id', id_part='child_name_1', help=""{0} VM instance with this ID. If missing, {0} VMSS."".format(help_prefix)) for scope in ['vmss update-instances', 'vmss delete-instances']: with self.argument_context(scope) as c: c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.') with self.argument_context('vmss diagnostics') as c: c.argument('vmss_name', id_part=None, help='Scale set name') with self.argument_context('vmss disk') as c: options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']] new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances', min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') with self.argument_context('vmss encryption') as c: c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) with self.argument_context('vmss extension') as c: c.argument('extension_name', name_arg_type, help='Name of the extension.') c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss nic') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1') c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2') with self.argument_context('vmss nic list') as c: c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss set-orchestration-service-state') as c: c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.') c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.') # endregion # region VM & VMSS Shared for scope in ['vm', 'vmss']: with self.argument_context(scope) as c: c.argument('no_auto_upgrade', options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')], arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.') with self.argument_context('{} run-command'.format(scope)) as c: c.argument('command_id', completer=get_vm_run_command_completion_list, help=""The command id. Use 'az {} run-command list' to get the list"".format(scope)) if scope == 'vmss': c.argument('vmss_name', vmss_name_type) with self.argument_context('{} run-command invoke'.format(scope)) as c: c.argument('parameters', nargs='+', help=""space-separated parameters in the format of '[name=]value'"") c.argument('scripts', nargs='+', help=""Space-separated script lines. Use @{file} to load script from a file"") with self.argument_context('{} stop'.format(scope)) as c: c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01') run_cmd_name_type = CLIArgumentType(options_list=['--name', '--run-command-name'], help='The name of the virtual machine run command.') run_cmd_vm_name = CLIArgumentType(options_list=['--vm-name'], help='The name of the virtual machine') for scope in ['create', 'update']: with self.argument_context('vm run-command {}'.format(scope)) as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using ""list"" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Specify the Azure storage blob where script output stream will be uploaded.') c.argument('error_blob_uri', help='Specify the Azure storage blob where script error stream will be uploaded.') with self.argument_context('vm run-command delete') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vm run-command list') as c: c.argument('vm_name', run_cmd_vm_name, id_part=None) c.argument('expand', help='The expand expression to apply on the operation.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) with self.argument_context('vm run-command show') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') with self.argument_context('vm run-command wait') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') run_cmd_vmss_name = CLIArgumentType(options_list=['--vmss-name'], help='The name of the VM scale set.') for scope in ['create', 'update']: with self.argument_context('vmss run-command {}'.format(scope)) as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using ""list"" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Uri (without SAS) to an append blob where the script output will be uploaded.') c.argument('error_blob_uri', help='Uri (without SAS) to an append blob where the script error stream will be uploaded.') with self.argument_context('vmss run-command delete') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vmss run-command list') as c: c.argument('vmss_name', run_cmd_vmss_name, id_part=None) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('expand', help='The expand expression to apply on the operation.') with self.argument_context('vmss run-command show') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('assign_identity', options_list=['--identities'], nargs='*', help=""Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'"".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity remove', 'vmss identity remove']: with self.argument_context(scope) as c: c.argument('identities', nargs='+', help=""Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'"".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity show', 'vmss identity show']: with self.argument_context(scope) as c: c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm application set', 'vmss application set']: with self.argument_context(scope) as c: c.argument('vm', existing_vm_name) c.argument('vmss_name', vmss_name_type) c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help=""Space-separated application version ids to set to VM."") c.argument('order_applications', action='store_true', help='Whether set order index at each gallery applications, the order index starts from 1.') c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*', help='Space-separated application configuration overrides for each application version ids. ' 'It should have the same number of items as the application version ids. Null is available for a application ' 'which does not have a configuration override.') for scope in ['vm application list', 'vmss application list']: with self.argument_context(scope) as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) c.argument('vmss_name', vmss_name_type, id_part=None) for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location') c.argument('tags', tags_type) c.argument('no_wait', help='Do not wait for the long-running operation to finish.') c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('image', completer=get_urn_aliases_completion_list) c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type) c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ ""sourceVault"": { ""id"": ""value"" }, ""vaultCertificates"": [{ ""certificateUrl"": ""value"", ""certificateStore"": ""cert store name (only on windows)""}] }]`', type=file_type, completer=FilesCompleter()) c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help=""accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples"") c.ignore('aux_subscriptions') c.argument('edge_zone', edge_zone_type) c.argument('accept_term', action='store_true', help=""Accept the license agreement and privacy statement."") with self.argument_context(scope, arg_group='Authentication') as c: c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory') c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.') c.argument('admin_password', help=""Password for the VM if authentication type is 'Password'."") c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+') c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value ""/home/username/.ssh/authorized_keys"" due to a known issue in Linux provisioning agent.') c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. ""all"" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all'])) with self.argument_context(scope, arg_group='Storage') as c: if DiskStorageAccountTypes: allowed_values = "", "".join([sku.value for sku in DiskStorageAccountTypes]) else: allowed_values = "", "".join(['Premium_LRS', 'Standard_LRS']) usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is ""os"" or a 0-indexed lun.' allowed_values = 'Allowed values: {}.'.format(allowed_values) storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \ 'or specify individual disks. {} {}'.format(usage, allowed_values) c.argument('os_disk_name', help='The name of the new VM OS disk.') c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux'])) c.argument('storage_account', help=""Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created."") c.argument('storage_sku', nargs='+', help=storage_sku_help) c.argument('storage_container_name', help=""Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds"") c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile') c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM') c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.') c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create') c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type') c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes)) c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+', help=""storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `= =` to configure individual disk"") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--ephemeral-os-disk`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.') c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01', help='Names or IDs (space delimited) of disk encryption sets for data disks.') c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.') c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.') c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01', help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.') c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)], nargs='+', min_api='2021-03-01', help='Specify whether data disk should be deleted or detached upon VM deletion.') with self.argument_context(scope, arg_group='Network') as c: c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.') c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.') c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.') c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.') c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.') c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).') c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify """" for None (\'""""\' in Azure CLI using PowerShell or --% operator).') c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static'])) c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.') if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK): PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'', default=None, arg_type=get_enum_type(PublicIPAddressSkuName)) c.argument('nic_delete_option', nargs='+', min_api='2021-03-01', help='Specify what happens to the network interface when the VM is deleted. Use a singular ' 'value to apply on all resources, or use = to configure ' 'the delete behavior for individual resources. Possible options are Delete and Detach.') with self.argument_context(scope, arg_group='Marketplace Image Plan') as c: c.argument('plan_name', help='plan name') c.argument('plan_product', help='plan product') c.argument('plan_publisher', help='plan publisher') c.argument('plan_promotion_code', help='plan promotion code') for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help=""Scope that the system assigned identity can access. "") c.ignore('identity_role_id') for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], arg_group='Managed Service Identity', help='Role name or id the system assigned identity will have. ') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], help=""Role name or id the system assigned identity will have"") with self.argument_context('vm auto-shutdown') as c: c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.') c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)') c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730') c.argument('webhook', help='The webhook URL to which the notification will be sent') c.argument('location', validator=get_default_location_from_resource_group) for scope in ['vm diagnostics', 'vmss diagnostics']: with self.argument_context(scope) as c: c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied') c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('is_windows_os', action='store_true', help='for Windows VMs') for scope in ['vm encryption', 'vmss encryption']: with self.argument_context(scope) as c: c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL'])) c.argument('force', action='store_true', help='continue by ignoring client side validation errors') c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.') c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.') c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.') for scope in ['vm extension', 'vmss extension']: with self.argument_context(scope) as c: c.argument('publisher', help='The name of the extension publisher.') c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.') c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.') c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.') c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(), help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.') with self.argument_context('vm extension set') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part=None) c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) with self.argument_context('vmss extension set', min_api='2017-12-01') as c: c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.') for scope in ['vm extension image', 'vmss extension image']: with self.argument_context(scope) as c: c.argument('image_location', options_list=['--location', '-l'], help='Image location.') c.argument('name', help='Image name', id_part=None) c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name') c.argument('type', options_list=['--name', '-n'], help='Name of the extension') c.argument('latest', action='store_true', help='Show the latest version only.') c.argument('version', help='Extension version') c.argument('orderby', help=""the $orderby odata query option"") c.argument('top', help='the $top odata query option') for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('license_type', license_type) c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help=""Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular."") c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True, help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons') c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'], help='The ID or name of the capacity reservation group that is used to allocate. Pass in ""None"" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.', min_api='2021-04-01', is_preview=True) c.argument('v_cpus_available', type=int, min_api='2021-11-01', help='Specify the number of vCPUs available') c.argument('v_cpus_per_core', type=int, min_api='2021-11-01', help='Specify the ratio of vCPU to physical core. Setting this property to 1 also means that hyper-threading is disabled.') with self.argument_context('vm update') as c: c.argument('license_type', license_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') with self.argument_context('vmss create') as c: c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help=""Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular."") with self.argument_context('sig') as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition') c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version') for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']: with self.argument_context(scope) as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition') with self.argument_context('sig list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('shared_to', shared_to_type) with self.argument_context('sig show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') for scope in ['sig share add', 'sig share remove']: with self.argument_context(scope) as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.') c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.') with self.argument_context('sig share add') as c: c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share remove') as c: c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share reset') as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') with self.argument_context('sig image-definition create') as c: c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD') c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help=""This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'."") c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores') c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores') c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB') c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB') c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan') c.argument('plan_name', help='plan name', arg_group='Purchase plan') c.argument('plan_product', help='plan product', arg_group='Purchase plan') c.argument('eula', help='The Eula agreement for the gallery image') c.argument('privacy_statement_uri', help='The privacy statement uri') c.argument('release_note_uri', help='The release note uri') c.argument('end_of_life_date', help=""the end of life date, e.g. '2020-12-31'"") c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS') c.argument('features', help='A list of gallery image features. E.g. ""IsSecureBootSupported=true IsMeasuredBootSupported=false""') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='gallery_images')), min_api='2021-10-01', help='CPU architecture.') with self.argument_context('sig image-definition list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-definition show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') with self.argument_context('sig create') as c: c.argument('description', help='the description of the gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig update') as c: c.ignore('gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig image-definition create') as c: c.argument('description', help='the description of the gallery image definition') with self.argument_context('sig image-definition update') as c: c.ignore('gallery_image') with self.argument_context('sig image-version') as c: deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration=""3.0.0"") c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`') with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c: c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`') c.argument('description', help='the description of the gallery image version') c.argument('managed_image', help='image name(if in the same resource group) or resource id') c.argument('os_snapshot', help='Name or ID of OS disk snapshot') c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots') c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots') c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.') c.argument('version', help='image version') c.argument('end_of_life_date', help=""the end of life date, e.g. '2020-12-31'"") c.argument('storage_account_type', help=""The default storage account type to be used per region. To set regional storage account types, use --target-regions"", arg_type=get_enum_type([""Standard_LRS"", ""Standard_ZRS"", ""Premium_LRS""]), min_api='2019-03-01') c.argument('target_region_encryption', nargs='+', help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `,,,,`. Use ""null"" as a placeholder.') c.argument('os_vhd_uri', help='Source VHD URI of OS disk') c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk') c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks') c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks') c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks') c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.') with self.argument_context('sig image-version list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-version show') as c: c.argument('expand', help=""The expand expression to apply on the operation, e.g. 'ReplicationStatus'"") with self.argument_context('sig image-version show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The ' 'name of the gallery image version to be created. Needs to follow semantic version name pattern: ' 'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. ' 'Format: ..', id_part='child_name_3') for scope in ['sig image-version create', 'sig image-version update']: with self.argument_context(scope) as c: c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace, help='Space-separated list of regions and their replica counts. Use `[=][=]` to optionally set the replica count and/or storage account type for each region. ' 'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used') c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int) # endregion # region Gallery applications with self.argument_context('sig gallery-application') as c: c.argument('gallery_application_name', options_list=['--name', '-n', '--application-name'], help='The name of the gallery Application') with self.argument_context('sig gallery-application create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='This property allows you ' 'to specify the supported type of the OS that application is built for.

Possible values ' 'are:

**Windows**

**Linux**') with self.argument_context('sig gallery-application update') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') with self.argument_context('sig gallery-application version') as c: c.argument('gallery_application_name', options_list=['--application-name'], help='The name of the gallery Application') c.argument('gallery_application_version_name', options_list=['--name', '-n', '--version-name'], help='The name of the gallery Application Version') for scope in ['create', 'update']: with self.argument_context('sig gallery-application version {}'.format(scope)) as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('package_file_link', help='The mediaLink of the artifact, must be a readable storage page blob.') c.argument('install_command', help='The path and arguments to install the gallery application.') c.argument('remove_command', help='The path and arguments to remove the gallery application.') c.argument('update_command', help='The path and arguments to update the gallery application. If not present,' ' then update operation will invoke remove command on the previous version ' 'and install command on the current version of the gallery application.') c.argument('target_regions', type=validate_file_or_dict, help='The target regions where the Image Version is ' 'going to be replicated to. This property is updatable. Expected value: ' 'json-string/json-file/@json-file.') c.argument('default_file_link', help='The default configuration link of the artifact, must be a readable storage page blob.') c.argument('exclude_from', arg_type=get_three_state_flag(), help='If set to true, Virtual Machines ' 'deployed from the latest version of the Image Definition won\'t use this Image Version.', arg_group='Publishing Profile') c.argument('end_of_life_date', help='The end of life date of the gallery image version. This property can be ' 'used for decommissioning purposes. This property is updatable.', arg_group='Publishing Profile') # endregion # region Proximity Placement Group with self.argument_context('ppg', min_api='2018-04-01') as c: c.argument('proximity_placement_group_name', arg_type=name_arg_type, help=""The name of the proximity placement group."") with self.argument_context('ppg create', min_api='2018-04-01') as c: c.argument('ppg_type', options_list=['--type', '-t'], help=""The type of the proximity placement group. Allowed values: Standard."") c.argument('tags', tags_type) with self.argument_context('ppg show', min_api='2019-07-01') as c: c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.') for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'), ('vm availability-set create', 'availability set'), ('vm update', 'VM'), ('vmss update', 'VMSS'), ('vm availability-set update', 'availability set')]: with self.argument_context(scope, min_api='2018-04-01') as c: c.argument('proximity_placement_group', options_list=['--ppg'], help=""The name or ID of the proximity placement group the {} should be associated with."".format(item), validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added. # endregion # region VM Monitor with self.argument_context('vm monitor log show') as c: c.argument('analytics_query', options_list=['--analytics-query', '-q'], help=""Query to execute over Log Analytics data."") c.argument('timespan', help=""Timespan over which to query. Defaults to querying all available data."") with self.argument_context('vm monitor metrics') as c: c.argument('metricnamespace', options_list=['--namespace'], help='Namespace to query metric definitions for.') with self.argument_context('vm monitor metrics tail') as c: from azure.mgmt.monitor.models import AggregationType c.extra('resource_group_name', required=True) c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) c.argument('metadata', action='store_true') c.argument('dimension', nargs='*', validator=validate_metric_dimension) c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*') c.argument('metrics', nargs='*') c.argument('orderby', help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc') c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.') c.argument('filters', options_list=['--filter']) c.argument('metric_namespace', options_list=['--namespace']) with self.argument_context('vm monitor metrics tail', arg_group='Time') as c: c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.')) c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.')) c.argument('offset', type=get_period_type(as_timedelta=True)) c.argument('interval', arg_group='Time', type=get_period_type()) with self.argument_context('vm monitor metrics list-definitions') as c: c.extra('resource_group_name', required=True) c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) # endregion # region disk encryption set with self.argument_context('disk-encryption-set') as c: c.argument('disk_encryption_set_name', disk_encryption_set_name) c.argument('key_url', help='URL pointing to a key or secret in KeyVault.') c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.') c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']), help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01', options_list=['--enable-auto-key-rotation', '--auto-rotation'], help='Enable automatic rotation of keys.') # endregion # region DiskAccess with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c: c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) # endRegion # region Capacity with self.argument_context('capacity reservation group') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'], help='The name of the capacity reservation group.') c.argument('tags', tags_type) with self.argument_context('capacity reservation group create') as c: c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.') with self.argument_context('capacity reservation group show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.') with self.argument_context('capacity reservation group list') as c: c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.') c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.') with self.argument_context('capacity reservation') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'], help='The name of the capacity reservation group.') c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'], help='The name of the capacity reservation.') c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.') c.argument('tags', tags_type) with self.argument_context('capacity reservation create') as c: c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.') c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called ""CapacityReservationSupported"" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.') with self.argument_context('capacity reservation show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.') # endRegion # region Restore point with self.argument_context('restore-point') as c: c.argument('restore_point_collection_name', options_list=['--collection-name'], help='The name of the restore point collection.') with self.argument_context('restore-point create') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('exclude_disks', nargs='+', help='List of disk resource ids that the ' 'customer wishes to exclude from the restore point. If no disks are specified, all disks will be ' 'included.') c.argument('source_restore_point', help='Resource Id of the source restore point from which a copy needs to be created') with self.argument_context('restore-point show') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a restore point.') with self.argument_context('restore-point delete') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') with self.argument_context('restore-point wait') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') # endRegion # region Restore point collection with self.argument_context('restore-point collection create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('source_id', help='Resource Id of the source resource used to create this restore point collection', arg_group='Source') with self.argument_context('restore-point collection update') as c: c.argument('tags', tags_type) with self.argument_context('restore-point collection show') as c: c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('restore_points', action='store_true', help='All contained restore points in the restorePointCollection.') ","def load_arguments(self, _): # Model imports DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes', operation_group='disks') SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots') UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes') HyperVGenerationTypes = self.get_models('HyperVGenerationTypes') DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes') OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets') RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux') GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries') ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions') # REUSABLE ARGUMENT DEFINITIONS name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME') multi_ids_type = CLIArgumentType(nargs='+') existing_vm_name = CLIArgumentType(overrides=name_arg_type, configured_default='vm', help=""The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=`"", completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name') existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name') existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name') vmss_name_type = CLIArgumentType(name_arg_type, configured_default='vmss', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), help=""Scale set name. You can configure the default using `az configure --defaults vmss=`"", id_part='name') extension_instance_name_type = CLIArgumentType(help=""Name of extension instance, which can be customized. Default: name of the extension."") image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name') disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name') ephemeral_placement_type = CLIArgumentType(options_list=['--ephemeral-os-disk-placement', '--ephemeral-placement'], arg_type=get_enum_type(['ResourceDisk', 'CacheDisk']), min_api='2019-12-01') license_type = CLIArgumentType( help=""Specifies that the Windows image or disk was licensed on-premises. To enable Azure Hybrid Benefit for "" ""Windows Server, use 'Windows_Server'. To enable Multi-tenant Hosting Rights for Windows 10, "" ""use 'Windows_Client'. For more information see the Azure Windows VM online docs."", arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_BASE', 'RHEL_SAPAPPS', 'RHEL_SAPHA', 'RHEL_EUS', 'RHEL_BASESAPAPPS', 'RHEL_BASESAPHA', 'SLES_STANDARD', 'SLES_SAP', 'SLES_HPC', 'None', 'RHEL_ELS_6'])) # StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute DiskStorageAccountTypes = DiskStorageAccountTypes or self.get_models('StorageAccountTypes') if DiskStorageAccountTypes: disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes)) else: # StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) if SnapshotStorageAccountTypes: snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes)) else: # SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) # special case for `network nic scale-set list` command alias with self.argument_context('network nic scale-set list') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') HyperVGenerationTypes = HyperVGenerationTypes or self.get_models('HyperVGeneration', operation_group='disks') if HyperVGenerationTypes: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default=""V1"")) else: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type([""V1"", ""V2""], default=""V1"")) ultra_ssd_enabled_type = CLIArgumentType( arg_type=get_three_state_flag(), min_api='2018-06-01', help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account') scale_in_policy_type = CLIArgumentType( nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')), help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.' ) edge_zone_type = CLIArgumentType( help='The name of edge zone.', min_api='2020-12-01', is_preview=True ) t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries') shared_to_type = CLIArgumentType( arg_type=get_enum_type(t_shared_to), help='The query parameter to decide what shared galleries to fetch when doing listing operations. ' 'If not specified, list by subscription id.' ) marker_type = CLIArgumentType( help='A string value that identifies the portion of the list of containers to be ' 'returned with the next listing operation. The operation returns the NextMarker value within ' 'the response body if the listing operation did not return all containers remaining to be listed ' 'with the current page. If specified, this generator will begin returning results from the point ' 'where the previous generator stopped.') enable_vtpm_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.') enable_secure_boot_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.') security_type = CLIArgumentType(arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01', help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.') # region MixedScopes for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']: with self.argument_context(scope) as c: c.argument('tags', tags_type) for scope in ['disk', 'snapshot']: with self.argument_context(scope) as c: c.ignore('source_blob_uri', 'source_disk', 'source_snapshot') c.argument('source_storage_account_id', help='used when source blob is in a different subscription') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int) if self.supported_api_version(min_api='2018-09-30', operation_group='disks'): c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level') c.argument('for_upload', arg_type=get_three_state_flag(), help='Create the {0} for uploading blobs later on through storage commands. Run ""az {0} grant-access --access-level Write"" to retrieve the {0}\'s SAS token.'.format(scope)) c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') else: c.ignore('access_level', 'for_upload', 'hyper_v_generation') c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType', operation_group='disks')), help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.') c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.') c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.') operation_group = 'disks' if scope == 'disk' else 'snapshots' c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group))) c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.') c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable on-demand bursting beyond the provisioned performance target of the disk. On-demand bursting is disabled by default, and it does not apply to Ultra disks.') c.argument('public_network_access', arg_type=get_enum_type(['Disabled', 'Enabled']), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to control the export policy on the disk.') c.argument('accelerated_network', arg_type=get_three_state_flag(), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to enable the accelerated networking if the OS disk image support.') for scope in ['disk create', 'snapshot create']: with self.argument_context(scope) as c: c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name') # endregion # region Disks with self.argument_context('disk', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c: c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to. c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.') c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help=""The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10"") c.argument('upload_size_bytes', type=int, min_api='2019-03-01', help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520') c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time') c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10') c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk') c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('gallery_image_reference', help='ID of the Compute Gallery image version from which to create a disk') c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.') c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.') c.argument('edge_zone', edge_zone_type) c.argument('security_type', choices=['TrustedLaunch'], help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01') c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='disks')), min_api='2021-12-01', help='CPU architecture.') # endregion # region Snapshots with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c: c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=snapshot_sku) c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01', help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed') c.argument('edge_zone', edge_zone_type) c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='snapshots')), min_api='2021-12-01', help='CPU architecture.') # endregion # region Images with self.argument_context('image') as c: c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux'])) c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images')) c.argument('tags', tags_type) with self.argument_context('image create') as c: # here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources c.argument('name', arg_type=name_arg_type, help='new image name') c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name') c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name') c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. ' 'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage') c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.') c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help=""Storage caching type for the image's OS disk."") c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes), help=""Storage caching type for the image's data disk."") c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api=""2019-03-01"", help='The hypervisor generation of the Virtual Machine created from the image.') c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots') c.argument('edge_zone', edge_zone_type, ) # endregion # region Image Templates with self.argument_context('image builder') as c: ib_output_name_help = ""Name of the image builder run output."" c.argument('location', get_location_type(self.cli_ctx)) c.argument('scripts', nargs='+', help=""Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."" "" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'"") c.argument('source', options_list=[""--image-source"", ""-i""], help=""The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID."") c.argument('image_template_name', image_template_name_type, help=""The name of the image template."") c.argument('checksum', help=""The SHA256 checksum of the Red Hat ISO image"") c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g ""image_1=westus2 image_2=westus"". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.') c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g ""my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth."" ' 'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a ""/"". Each value is a comma-delimited list of replica locations.') c.argument('output_name', help=ib_output_name_help) c.ignore('destinations_lists', 'scripts_list', 'source_dict') with self.argument_context('image builder create') as c: ib_source_type = CLIArgumentType(arg_group=""Image Source"") ib_customizer_type = CLIArgumentType(arg_group=""Customizer"") ib_cutput_type = CLIArgumentType(arg_group=""Output"") c.argument('build_timeout', type=int, help=""The Maximum duration to wait while building the image template, in minutes. Default is 60."") c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json') c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.') # VM profile c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)') c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size') c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name') c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine') c.argument('proxy_vm_size', help='Size of the virtual machine used to build, customize and capture images (Standard_D1_v2 for Gen1 images and Standard_D2ds_v4 for Gen2 images).') c.argument('build_vm_identities', nargs='+', help='Optional configuration of the virtual network to use to deploy the build virtual machine in. Omit if no specific virtual network needs to be used.') # Image Source Arguments c.argument('source', arg_type=ib_source_type) c.argument('checksum', arg_type=ib_source_type) c.argument('', arg_type=ib_source_type) # Image Customizer Arguments c.argument('scripts', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) # Image Output Arguments c.argument('managed_image_destinations', arg_type=ib_cutput_type) c.argument('shared_image_destinations', arg_type=ib_cutput_type) c.argument('output_name', arg_type=ib_cutput_type) with self.argument_context('image builder output') as c: ib_sig_regions_help = ""Space-separated list of regions to replicate the image version into."" ib_img_location_help = ""Location where the customized image will be created."" c.argument('gallery_image_definition', arg_group=""Shared Image Gallery"", help=""Name or ID of the existing SIG image definition to create the customized image version with."") c.argument('gallery_name', arg_group=""Shared Image Gallery"", help=""Shared image gallery name, if image definition name and not ID was provided."") c.argument('gallery_replication_regions', arg_group=""Shared Image Gallery"", nargs='+', help=ib_sig_regions_help) c.argument('managed_image', arg_group=""Managed Image"", help=""Name or ID of the customized managed image to be created."") c.argument('managed_image_location', arg_group=""Managed Image"", help=ib_img_location_help) with self.argument_context('image builder output add') as c: ib_artifact_tags_help = ""Tags that will be applied to the output artifact once it has been created by the distributor. "" + tags_type.settings['help'] ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=[""--artifact-tags""]) ib_default_loc_help = "" Defaults to resource group's location."" c.argument('output_name', help=ib_output_name_help + "" Defaults to the name of the managed image or sig image definition."") c.argument('gallery_replication_regions', arg_group=""Shared Image Gallery"", nargs='+', help=ib_sig_regions_help + ib_default_loc_help) c.argument('managed_image_location', arg_group=""Managed Image"", help=ib_img_location_help + ib_default_loc_help) c.argument('is_vhd', arg_group=""VHD"", help=""The output is a VHD distributor."", action='store_true') c.argument('tags', arg_type=ib_artifact_tags_type) c.ignore('location') with self.argument_context('image builder customizer') as c: ib_win_restart_type = CLIArgumentType(arg_group=""Windows Restart"") ib_win_update_type = CLIArgumentType(arg_group=""Windows Update"") ib_script_type = CLIArgumentType(arg_group=""Shell and Powershell"") ib_powershell_type = CLIArgumentType(arg_group=""Powershell"") ib_file_customizer_type = CLIArgumentType(arg_group=""File"") c.argument('customizer_name', help=""Name of the customizer."") c.argument('customizer_type', options_list=['--type', '-t'], help=""Type of customizer to be added to the image template."", arg_type=get_enum_type(ScriptType)) # Script Args c.argument('script_url', arg_type=ib_script_type, help=""URL of script to customize the image with. The URL must be publicly accessible."") c.argument('inline_script', arg_type=ib_script_type, nargs='+', help=""Space-separated list of inline script lines to customize the image with."") # Powershell Specific Args c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help=""Space-separated list of valid exit codes, as integers"") # Windows Restart Specific Args c.argument('restart_command', arg_type=ib_win_restart_type, help=""Command to execute the restart operation."") c.argument('restart_check_command', arg_type=ib_win_restart_type, help=""Command to verify that restart succeeded."") c.argument('restart_timeout', arg_type=ib_win_restart_type, help=""Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)"", default=""5m"") # Windows Update Specific Args c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.') c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)') c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)') # File Args c.argument('file_source', arg_type=ib_file_customizer_type, help=""The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc."") c.argument('dest_path', arg_type=ib_file_customizer_type, help=""The absolute destination path where the file specified in --file-source will be downloaded to in the image"") # endregion # region AvailabilitySets with self.argument_context('vm availability-set') as c: c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') with self.argument_context('vm availability-set create') as c: c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set') c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.') c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.') c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks') with self.argument_context('vm availability-set update') as c: if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'): c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') c.argument('availability_set_name', options_list=['--availability-set-name']) # endregion # region VirtualMachines with self.argument_context('vm') as c: c.argument('vm_name', existing_vm_name) c.argument('size', completer=get_vm_size_completion_list) c.argument('name', arg_type=name_arg_type) c.argument('zone', zone_type, min_api='2017-03-30') c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify """" for none.', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH'])) c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network') c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.') with self.argument_context('vm capture') as c: c.argument('overwrite', action='store_true') with self.argument_context('vm update') as c: c.argument('os_disk', min_api='2017-12-01', help=""Managed OS disk ID or name to swap to"") c.argument('write_accelerator', nargs='*', min_api='2017-12-01', help=""enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2"") c.argument('disk_caching', nargs='*', help=""Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks"") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('size', help='The new size of the virtual machine. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--size`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create') as c: c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines')) c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None) c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage') c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.') c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.') c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify """" for none (\'""""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE'])) c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('boot_diagnostics_storage', help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS') c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network', help=""enable accelerated networking. Unless specified, CLI will enable it based on machine image and size"") if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE): VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help=""The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine"") c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.') c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01') c.argument('platform_fault_domain', min_api='2020-06-01', help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view') c.argument('count', type=int, is_preview=True, help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create', arg_group='Storage') as c: c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help=""Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together."") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help=""ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together."") with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help=""Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples."") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help=""ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples."") with self.argument_context('vm open-port') as c: c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.') c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name) c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true') c.argument('port', help=""The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range."") c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int) for scope in ['vm show', 'vm list']: with self.argument_context(scope) as c: c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow') for scope in ['vm show', 'vmss show']: with self.argument_context(scope) as c: c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01') for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']: with self.argument_context(scope) as c: c.ignore('include_user_data') with self.argument_context('vm diagnostics') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name']) with self.argument_context('vm diagnostics set') as c: c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts')) with self.argument_context('vm install-patches') as c: c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)') c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.') c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.') c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.') c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only') c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only') c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help=""Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only"") c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') with self.argument_context('vm disk') as c: c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines')) c.argument('new', action='store_true', help='create a new disk') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') with self.argument_context('vm disk attach') as c: c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator') c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)], help=""The name or ID of the managed disk"", id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('disks', nargs='*', help=""One or more names or IDs of the managed disk (space-delimited)."", completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('ids', deprecate_info=c.deprecate(target='--ids', redirect='--disks', hide=True)) with self.argument_context('vm disk detach') as c: c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.') with self.argument_context('vm encryption enable') as c: c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted. (Only supported for Linux virtual machines.)') # Place aad arguments in their own group aad_arguments = 'Azure Active Directory' c.argument('aad_client_id', arg_group=aad_arguments) c.argument('aad_client_secret', arg_group=aad_arguments) c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments) with self.argument_context('vm extension') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1') c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(expiration='3.0.0', hide=True)) with self.argument_context('vm extension list') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm extension show') as c: c.argument('instance_view', action='store_true', help='The instance view of a virtual machine extension.') with self.argument_context('vm secret') as c: c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query ""[?attributes.enabled].id"" -o tsv\'') c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault) c.argument('certificate', help='key vault certificate name or its full secret URL') c.argument('certificate_store', help='Windows certificate store names. Default: My') with self.argument_context('vm secret list') as c: c.argument('vm_name', arg_type=existing_vm_name, id_part=None) with self.argument_context('vm image') as c: c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('plan', help='image billing plan') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('version', help=""image sku's version"") c.argument('urn', help=""URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted"") with self.argument_context('vm image list') as c: c.argument('image_location', get_location_type(self.cli_ctx)) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-offers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-skus') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-publishers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image show') as c: c.argument('skus', options_list=['--sku', '-s']) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image terms') as c: c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted') c.argument('publisher', help='Image publisher') c.argument('offer', help='Image offer') c.argument('plan', help='Image billing plan') with self.argument_context('vm nic') as c: c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None) c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics) c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.') with self.argument_context('vm nic show') as c: c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic) with self.argument_context('vm unmanaged-disk') as c: c.argument('new', action='store_true', help='Create a new disk.') c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') c.argument('vhd_uri', help=""Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd"") with self.argument_context('vm unmanaged-disk attach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) with self.argument_context('vm unmanaged-disk detach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']: with self.argument_context(scope) as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm unmanaged-disk list') as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) with self.argument_context('vm user') as c: c.argument('username', options_list=['--username', '-u'], help='The user name') c.argument('password', options_list=['--password', '-p'], help='The user password') with self.argument_context('vm list-skus') as c: c.argument('size', options_list=['--size', '-s'], help=""size name, partial name is accepted"") c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help=""show skus supporting availability zones"") c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(), help=""show all information including vm sizes not available under the current subscription"") c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. ""availabilitySets"", ""snapshots"", ""disks"", etc') with self.argument_context('vm restart') as c: c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.') with self.argument_context('vm host') as c: c.argument('host_group_name', options_list=['--host-group'], id_part='name', help=""Name of the Dedicated Host Group"") c.argument('host_name', name_arg_type, id_part='child_name_1', help=""Name of the Dedicated Host"") c.ignore('expand') with self.argument_context('vm host create') as c: c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int, help=""Fault domain of the host within a group. Allowed values: 0, 1, 2"") c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(), help=""Replace the host automatically if a failure occurs"") c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes), help=""The software license type that will be applied to the VMs deployed on the dedicated host."") c.argument('sku', help=""SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/"") with self.argument_context('vm host list') as c: c.argument('host_group_name', id_part=None) with self.argument_context('vm host group') as c: c.argument('host_group_name', name_arg_type, id_part='name', help=""Name of the Dedicated Host Group"") c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Specify whether virtual machines or virtual machine scale sets can be placed automatically ' 'on the dedicated host group. Automatic placement means resources are allocated on dedicated ' 'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to ' 'false when not provided.') with self.argument_context('vm host group create') as c: c.argument('platform_fault_domain_count', options_list=[""--platform-fault-domain-count"", ""-c""], type=int, help=""Number of fault domains that the host group can span."") c.argument('zones', zone_type) for scope in [""vm host"", ""vm host group""]: with self.argument_context(""{} create"".format(scope)) as c: location_type = get_location_type(self.cli_ctx) custom_location_msg = "" Otherwise, location will default to the resource group's location"" custom_location_type = CLIArgumentType(overrides=location_type, help=location_type.settings[""help""] + custom_location_msg) c.argument('location', arg_type=custom_location_type) # endregion # region VMSS scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name'] with self.argument_context('vmss') as c: c.argument('zones', zones_type, min_api='2017-03-30') c.argument('instance_id', id_part='child_name_1') c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself') c.argument('tags', tags_type) c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type) c.argument('host_group', min_api='2020-06-01', help='Name or ID of dedicated host group that the virtual machine scale set resides in') for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']: with self.argument_context(scope) as c: for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c: VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('name', name_arg_type) c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.') c.argument('single_placement_group', arg_type=get_three_state_flag(), help=""Limit the scale set to a single placement group."" "" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details."") c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01') c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.') c.argument('instance_count', help='Number of VMs in the scale set.', type=int) c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true') c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode)) c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs') c.argument('vm_sku', help='Size of VMs in the scale set. Default to ""Standard_DS1_v2"". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network') c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help=""The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set"") c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long') c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.', arg_type=get_enum_type(['Uniform', 'Flexible'])) c.argument('scale_in_policy', scale_in_policy_type) c.argument('automatic_repairs_grace_period', min_api='2018-10-01', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.') c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('network_api_version', min_api='2021-03-01', help=""Specify the Microsoft.Network API version used when creating networking resources in the Network "" ""Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default "" ""value is 2020-11-01."") c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss create', arg_group='Network Balancer') as c: LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify """" for none.', options_list=['--app-gateway']) c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.') c.argument('app_gateway_sku', help='SKU when creating a new application gateway.') c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.') c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.') c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int) c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify """" for none.', options_list=['--load-balancer', '--lb']) c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName), help=""Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'"") c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name']) with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c: c.argument('public_ip_per_vm', action='store_true', help=""Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules"") c.argument('vm_domain_name', help=""domain name of VM instances, once configured, the FQDN is `vm..<..rest..>`"") c.argument('dns_servers', nargs='+', help=""space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6"") c.argument('accelerated_networking', arg_type=get_three_state_flag(), help=""enable accelerated networking. Unless specified, CLI will enable it based on machine image and size"") with self.argument_context('vmss update') as c: protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group=""Protection Policy"", min_api='2019-03-01') c.argument('protect_from_scale_in', arg_type=protection_policy_type, help=""Protect the VM instance from scale-in operations."") c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help=""Protect the VM instance from scale set actions (including scale-in)."") c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(), help='Enable terminate notification') c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('scale_in_policy', scale_in_policy_type) c.argument('force_deletion', action='store_true', is_preview=True, help='This property allow you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('vm_sku', help='The new size of the virtual machine instances in the scale set. Default to ""Standard_DS1_v2"". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--vm-sku`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c: c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs') c.argument( 'automatic_repairs_grace_period', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.' ) c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') for scope in ['vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('terminate_notification_time', min_api='2019-03-01', help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted') c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01', help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%') c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%') c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%') c.argument('pause_time_between_batches', min_api='2020-12-01', help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds') c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size') c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances') for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]: with self.argument_context(scope) as c: c.argument('instance_id', id_part='child_name_1', help=""{0} VM instance with this ID. If missing, {0} VMSS."".format(help_prefix)) for scope in ['vmss update-instances', 'vmss delete-instances']: with self.argument_context(scope) as c: c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.') with self.argument_context('vmss diagnostics') as c: c.argument('vmss_name', id_part=None, help='Scale set name') with self.argument_context('vmss disk') as c: options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']] new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances', min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') with self.argument_context('vmss encryption') as c: c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) with self.argument_context('vmss extension') as c: c.argument('extension_name', name_arg_type, help='Name of the extension.') c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss nic') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1') c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2') with self.argument_context('vmss nic list') as c: c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss set-orchestration-service-state') as c: c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.') c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.') # endregion # region VM & VMSS Shared for scope in ['vm', 'vmss']: with self.argument_context(scope) as c: c.argument('no_auto_upgrade', options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')], arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.') with self.argument_context('{} run-command'.format(scope)) as c: c.argument('command_id', completer=get_vm_run_command_completion_list, help=""The command id. Use 'az {} run-command list' to get the list"".format(scope)) if scope == 'vmss': c.argument('vmss_name', vmss_name_type) with self.argument_context('{} run-command invoke'.format(scope)) as c: c.argument('parameters', nargs='+', help=""space-separated parameters in the format of '[name=]value'"") c.argument('scripts', nargs='+', help=""Space-separated script lines. Use @{file} to load script from a file"") with self.argument_context('{} stop'.format(scope)) as c: c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01') run_cmd_name_type = CLIArgumentType(options_list=['--name', '--run-command-name'], help='The name of the virtual machine run command.') run_cmd_vm_name = CLIArgumentType(options_list=['--vm-name'], help='The name of the virtual machine') for scope in ['create', 'update']: with self.argument_context('vm run-command {}'.format(scope)) as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using ""list"" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Specify the Azure storage blob where script output stream will be uploaded.') c.argument('error_blob_uri', help='Specify the Azure storage blob where script error stream will be uploaded.') with self.argument_context('vm run-command delete') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vm run-command list') as c: c.argument('vm_name', run_cmd_vm_name, id_part=None) c.argument('expand', help='The expand expression to apply on the operation.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) with self.argument_context('vm run-command show') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') with self.argument_context('vm run-command wait') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') run_cmd_vmss_name = CLIArgumentType(options_list=['--vmss-name'], help='The name of the VM scale set.') for scope in ['create', 'update']: with self.argument_context('vmss run-command {}'.format(scope)) as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using ""list"" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Uri (without SAS) to an append blob where the script output will be uploaded.') c.argument('error_blob_uri', help='Uri (without SAS) to an append blob where the script error stream will be uploaded.') with self.argument_context('vmss run-command delete') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vmss run-command list') as c: c.argument('vmss_name', run_cmd_vmss_name, id_part=None) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('expand', help='The expand expression to apply on the operation.') with self.argument_context('vmss run-command show') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('assign_identity', options_list=['--identities'], nargs='*', help=""Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'"".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity remove', 'vmss identity remove']: with self.argument_context(scope) as c: c.argument('identities', nargs='+', help=""Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'"".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity show', 'vmss identity show']: with self.argument_context(scope) as c: c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm application set', 'vmss application set']: with self.argument_context(scope) as c: c.argument('vm', existing_vm_name) c.argument('vmss_name', vmss_name_type) c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help=""Space-separated application version ids to set to VM."") c.argument('order_applications', action='store_true', help='Whether set order index at each gallery applications, the order index starts from 1.') c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*', help='Space-separated application configuration overrides for each application version ids. ' 'It should have the same number of items as the application version ids. Null is available for a application ' 'which does not have a configuration override.') for scope in ['vm application list', 'vmss application list']: with self.argument_context(scope) as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) c.argument('vmss_name', vmss_name_type, id_part=None) for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location') c.argument('tags', tags_type) c.argument('no_wait', help='Do not wait for the long-running operation to finish.') c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('image', completer=get_urn_aliases_completion_list) c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type) c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ ""sourceVault"": { ""id"": ""value"" }, ""vaultCertificates"": [{ ""certificateUrl"": ""value"", ""certificateStore"": ""cert store name (only on windows)""}] }]`', type=file_type, completer=FilesCompleter()) c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help=""accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples"") c.ignore('aux_subscriptions') c.argument('edge_zone', edge_zone_type) c.argument('accept_term', action='store_true', help=""Accept the license agreement and privacy statement."") with self.argument_context(scope, arg_group='Authentication') as c: c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory') c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.') c.argument('admin_password', help=""Password for the VM if authentication type is 'Password'."") c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+') c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value ""/home/username/.ssh/authorized_keys"" due to a known issue in Linux provisioning agent.') c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. ""all"" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all'])) with self.argument_context(scope, arg_group='Storage') as c: if DiskStorageAccountTypes: allowed_values = "", "".join([sku.value for sku in DiskStorageAccountTypes]) else: allowed_values = "", "".join(['Premium_LRS', 'Standard_LRS']) usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is ""os"" or a 0-indexed lun.' allowed_values = 'Allowed values: {}.'.format(allowed_values) storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \ 'or specify individual disks. {} {}'.format(usage, allowed_values) c.argument('os_disk_name', help='The name of the new VM OS disk.') c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux'])) c.argument('storage_account', help=""Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created."") c.argument('storage_sku', nargs='+', help=storage_sku_help) c.argument('storage_container_name', help=""Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds"") c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile') c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM') c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.') c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create') c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type') c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes)) c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+', help=""storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `= =` to configure individual disk"") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--ephemeral-os-disk`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.') c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01', help='Names or IDs (space delimited) of disk encryption sets for data disks.') c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.') c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.') c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01', help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.') c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)], nargs='+', min_api='2021-03-01', help='Specify whether data disk should be deleted or detached upon VM deletion.') with self.argument_context(scope, arg_group='Network') as c: c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.') c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.') c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.') c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.') c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.') c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).') c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify """" for None (\'""""\' in Azure CLI using PowerShell or --% operator).') c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static'])) c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.') if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK): PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'', default=None, arg_type=get_enum_type(PublicIPAddressSkuName)) c.argument('nic_delete_option', nargs='+', min_api='2021-03-01', help='Specify what happens to the network interface when the VM is deleted. Use a singular ' 'value to apply on all resources, or use = to configure ' 'the delete behavior for individual resources. Possible options are Delete and Detach.') with self.argument_context(scope, arg_group='Marketplace Image Plan') as c: c.argument('plan_name', help='plan name') c.argument('plan_product', help='plan product') c.argument('plan_publisher', help='plan publisher') c.argument('plan_promotion_code', help='plan promotion code') for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help=""Scope that the system assigned identity can access. "") c.ignore('identity_role_id') for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], arg_group='Managed Service Identity', help='Role name or id the system assigned identity will have. ') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], help=""Role name or id the system assigned identity will have"") with self.argument_context('vm auto-shutdown') as c: c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.') c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)') c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730') c.argument('webhook', help='The webhook URL to which the notification will be sent') c.argument('location', validator=get_default_location_from_resource_group) for scope in ['vm diagnostics', 'vmss diagnostics']: with self.argument_context(scope) as c: c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied') c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('is_windows_os', action='store_true', help='for Windows VMs') for scope in ['vm encryption', 'vmss encryption']: with self.argument_context(scope) as c: c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL'])) c.argument('force', action='store_true', help='continue by ignoring client side validation errors') c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.') c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.') c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.') for scope in ['vm extension', 'vmss extension']: with self.argument_context(scope) as c: c.argument('publisher', help='The name of the extension publisher.') c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.') c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.') c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.') c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(), help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.') with self.argument_context('vm extension set') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part=None) c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) with self.argument_context('vmss extension set', min_api='2017-12-01') as c: c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.') for scope in ['vm extension image', 'vmss extension image']: with self.argument_context(scope) as c: c.argument('image_location', options_list=['--location', '-l'], help='Image location.') c.argument('name', help='Image name', id_part=None) c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name') c.argument('type', options_list=['--name', '-n'], help='Name of the extension') c.argument('latest', action='store_true', help='Show the latest version only.') c.argument('version', help='Extension version') c.argument('orderby', help=""the $orderby odata query option"") c.argument('top', help='the $top odata query option') for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('license_type', license_type) c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help=""Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular."") c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True, help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons') c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'], help='The ID or name of the capacity reservation group that is used to allocate. Pass in ""None"" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.', min_api='2021-04-01', is_preview=True) c.argument('v_cpus_available', type=int, min_api='2021-11-01', help='Specify the number of vCPUs available') c.argument('v_cpus_per_core', type=int, min_api='2021-11-01', help='Specify the ratio of vCPU to physical core. Setting this property to 1 also means that hyper-threading is disabled.') with self.argument_context('vm update') as c: c.argument('license_type', license_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') with self.argument_context('vmss create') as c: c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help=""Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular."") with self.argument_context('sig') as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition') c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version') for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']: with self.argument_context(scope) as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition') with self.argument_context('sig list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('shared_to', shared_to_type) with self.argument_context('sig show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') for scope in ['sig share add', 'sig share remove']: with self.argument_context(scope) as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.') c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.') with self.argument_context('sig share add') as c: c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share remove') as c: c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share reset') as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') with self.argument_context('sig image-definition create') as c: c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD') c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help=""This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'."") c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores') c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores') c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB') c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB') c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan') c.argument('plan_name', help='plan name', arg_group='Purchase plan') c.argument('plan_product', help='plan product', arg_group='Purchase plan') c.argument('eula', help='The Eula agreement for the gallery image') c.argument('privacy_statement_uri', help='The privacy statement uri') c.argument('release_note_uri', help='The release note uri') c.argument('end_of_life_date', help=""the end of life date, e.g. '2020-12-31'"") c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS') c.argument('features', help='A list of gallery image features. E.g. ""IsSecureBootSupported=true IsMeasuredBootSupported=false""') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='gallery_images')), min_api='2021-10-01', help='CPU architecture.') with self.argument_context('sig image-definition list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-definition show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') with self.argument_context('sig create') as c: c.argument('description', help='the description of the gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig update') as c: c.ignore('gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig image-definition create') as c: c.argument('description', help='the description of the gallery image definition') with self.argument_context('sig image-definition update') as c: c.ignore('gallery_image') with self.argument_context('sig image-version') as c: deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration=""3.0.0"") c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`') with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c: c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`') c.argument('description', help='the description of the gallery image version') c.argument('managed_image', help='image name(if in the same resource group) or resource id') c.argument('os_snapshot', help='Name or ID of OS disk snapshot') c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots') c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots') c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.') c.argument('version', help='image version') c.argument('end_of_life_date', help=""the end of life date, e.g. '2020-12-31'"") c.argument('storage_account_type', help=""The default storage account type to be used per region. To set regional storage account types, use --target-regions"", arg_type=get_enum_type([""Standard_LRS"", ""Standard_ZRS"", ""Premium_LRS""]), min_api='2019-03-01') c.argument('target_region_encryption', nargs='+', help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `,,,,`. Use ""null"" as a placeholder.') c.argument('os_vhd_uri', help='Source VHD URI of OS disk') c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk') c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks') c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks') c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks') c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.') with self.argument_context('sig image-version list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-version show') as c: c.argument('expand', help=""The expand expression to apply on the operation, e.g. 'ReplicationStatus'"") with self.argument_context('sig image-version show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The ' 'name of the gallery image version to be created. Needs to follow semantic version name pattern: ' 'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. ' 'Format: ..', id_part='child_name_3') for scope in ['sig image-version create', 'sig image-version update']: with self.argument_context(scope) as c: c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace, help='Space-separated list of regions and their replica counts. Use `[=][=]` to optionally set the replica count and/or storage account type for each region. ' 'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used') c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int) # endregion # region Gallery applications with self.argument_context('sig gallery-application') as c: c.argument('gallery_application_name', options_list=['--name', '-n', '--application-name'], help='The name of the gallery Application') with self.argument_context('sig gallery-application create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='This property allows you ' 'to specify the supported type of the OS that application is built for.

Possible values ' 'are:

**Windows**

**Linux**') with self.argument_context('sig gallery-application update') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') with self.argument_context('sig gallery-application version') as c: c.argument('gallery_application_name', options_list=['--application-name'], help='The name of the gallery Application') c.argument('gallery_application_version_name', options_list=['--name', '-n', '--version-name'], help='The name of the gallery Application Version') for scope in ['create', 'update']: with self.argument_context('sig gallery-application version {}'.format(scope)) as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('package_file_link', help='The mediaLink of the artifact, must be a readable storage page blob.') c.argument('install_command', help='The path and arguments to install the gallery application.') c.argument('remove_command', help='The path and arguments to remove the gallery application.') c.argument('update_command', help='The path and arguments to update the gallery application. If not present,' ' then update operation will invoke remove command on the previous version ' 'and install command on the current version of the gallery application.') c.argument('target_regions', type=validate_file_or_dict, help='The target regions where the Image Version is ' 'going to be replicated to. This property is updatable. Expected value: ' 'json-string/json-file/@json-file.') c.argument('default_file_link', help='The default configuration link of the artifact, must be a readable storage page blob.') c.argument('exclude_from', arg_type=get_three_state_flag(), help='If set to true, Virtual Machines ' 'deployed from the latest version of the Image Definition won\'t use this Image Version.', arg_group='Publishing Profile') c.argument('end_of_life_date', help='The end of life date of the gallery image version. This property can be ' 'used for decommissioning purposes. This property is updatable.', arg_group='Publishing Profile') # endregion # region Proximity Placement Group with self.argument_context('ppg', min_api='2018-04-01') as c: c.argument('proximity_placement_group_name', arg_type=name_arg_type, help=""The name of the proximity placement group."") with self.argument_context('ppg create', min_api='2018-04-01') as c: c.argument('ppg_type', options_list=['--type', '-t'], help=""The type of the proximity placement group. Allowed values: Standard."") c.argument('tags', tags_type) with self.argument_context('ppg show', min_api='2019-07-01') as c: c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.') for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'), ('vm availability-set create', 'availability set'), ('vm update', 'VM'), ('vmss update', 'VMSS'), ('vm availability-set update', 'availability set')]: with self.argument_context(scope, min_api='2018-04-01') as c: c.argument('proximity_placement_group', options_list=['--ppg'], help=""The name or ID of the proximity placement group the {} should be associated with."".format(item), validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added. # endregion # region VM Monitor with self.argument_context('vm monitor log show') as c: c.argument('analytics_query', options_list=['--analytics-query', '-q'], help=""Query to execute over Log Analytics data."") c.argument('timespan', help=""Timespan over which to query. Defaults to querying all available data."") with self.argument_context('vm monitor metrics') as c: c.argument('metricnamespace', options_list=['--namespace'], help='Namespace to query metric definitions for.') with self.argument_context('vm monitor metrics tail') as c: from azure.mgmt.monitor.models import AggregationType c.extra('resource_group_name', required=True) c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) c.argument('metadata', action='store_true') c.argument('dimension', nargs='*', validator=validate_metric_dimension) c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*') c.argument('metrics', nargs='*') c.argument('orderby', help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc') c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.') c.argument('filters', options_list=['--filter']) c.argument('metric_namespace', options_list=['--namespace']) with self.argument_context('vm monitor metrics tail', arg_group='Time') as c: c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.')) c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.')) c.argument('offset', type=get_period_type(as_timedelta=True)) c.argument('interval', arg_group='Time', type=get_period_type()) with self.argument_context('vm monitor metrics list-definitions') as c: c.extra('resource_group_name', required=True) c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) # endregion # region disk encryption set with self.argument_context('disk-encryption-set') as c: c.argument('disk_encryption_set_name', disk_encryption_set_name) c.argument('key_url', help='URL pointing to a key or secret in KeyVault.') c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.') c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']), help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01', options_list=['--enable-auto-key-rotation', '--auto-rotation'], help='Enable automatic rotation of keys.') # endregion # region DiskAccess with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c: c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) # endRegion # region Capacity with self.argument_context('capacity reservation group') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'], help='The name of the capacity reservation group.') c.argument('tags', tags_type) with self.argument_context('capacity reservation group create') as c: c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.') with self.argument_context('capacity reservation group show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.') with self.argument_context('capacity reservation group list') as c: c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.') c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.') with self.argument_context('capacity reservation') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'], help='The name of the capacity reservation group.') c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'], help='The name of the capacity reservation.') c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.') c.argument('tags', tags_type) with self.argument_context('capacity reservation create') as c: c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.') c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called ""CapacityReservationSupported"" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.') with self.argument_context('capacity reservation show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.') # endRegion # region Restore point with self.argument_context('restore-point') as c: c.argument('restore_point_collection_name', options_list=['--collection-name'], help='The name of the restore point collection.') with self.argument_context('restore-point create') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('exclude_disks', nargs='+', help='List of disk resource ids that the ' 'customer wishes to exclude from the restore point. If no disks are specified, all disks will be ' 'included.') c.argument('source_restore_point', help='Resource Id of the source restore point from which a copy needs to be created') with self.argument_context('restore-point show') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a restore point.') with self.argument_context('restore-point delete') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') with self.argument_context('restore-point wait') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') # endRegion # region Restore point collection with self.argument_context('restore-point collection create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('source_id', help='Resource Id of the source resource used to create this restore point collection', arg_group='Source') with self.argument_context('restore-point collection update') as c: c.argument('tags', tags_type) with self.argument_context('restore-point collection show') as c: c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('restore_points', action='store_true', help='Show all contained restore points in the restorePointCollection.') " 31347,"def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" ''' EXECUTION ''' #LOG('command is %s' % (demisto.command(), )) demisto.debug(f'Command being called is {demisto.command()}') try: LOG('Command being called is {command}'.format(command=demisto.command())) if demisto.command() == 'Picus-GetAccessToken': getAccessToken() elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results token = getAccessToken() demisto.results(vectorCompare(token)) elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(attackResultList(token)) elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional token = getAccessToken() demisto.results(specificThreatsResults(token)) elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses token = getAccessToken() demisto.results(peerList(token)) elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses token = getAccessToken() demisto.results(eMailPeerList(token)) elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors token = getAccessToken() demisto.results(attackAllVectors(token)) elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector token = getAccessToken() demisto.results(attackSingle(token)) elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully token = getAccessToken() demisto.results(triggerUpdate(token)) elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config token = getAccessToken() demisto.results(version(token)) elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(threatList(token)) elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(mitigationList(token)) elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters token = getAccessToken() demisto.results(mitreMatrix(token)) elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(sigmaRulesList(token)) elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination token = getAccessToken() demisto.results(vectorList(token)) elif demisto.command() == 'test-module': demisto.results(test_module()) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') ","def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" ''' EXECUTION ''' #LOG('command is %s' % (demisto.command(), )) demisto.debug(f'Command being called is {demisto.command()}') try: LOG('Command being called is {command}'.format(command=demisto.command())) if demisto.command() == 'Picus-GetAccessToken': getAccessToken() elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results token = getAccessToken() demisto.results(vectorCompare(token)) elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(attackResultList(token)) elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional token = getAccessToken() demisto.results(specificThreatsResults(token)) elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses token = getAccessToken() demisto.results(peerList(token)) elif demisto.command() == 'picus-email-peer-list': # Returns the E-Mail peer list with current statuses token = getAccessToken() demisto.results(eMailPeerList(token)) elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors token = getAccessToken() demisto.results(attackAllVectors(token)) elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector token = getAccessToken() demisto.results(attackSingle(token)) elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully token = getAccessToken() demisto.results(triggerUpdate(token)) elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config token = getAccessToken() demisto.results(version(token)) elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(threatList(token)) elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(mitigationList(token)) elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters token = getAccessToken() demisto.results(mitreMatrix(token)) elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(sigmaRulesList(token)) elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination token = getAccessToken() demisto.results(vectorList(token)) elif demisto.command() == 'test-module': demisto.results(test_module()) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') " 9616,"def main(): module = AnsibleModule( argument_spec=dict( type=dict(required=True, choices=['user', 'group', 'project']), name=dict(required=False, default=None), mountpoint=dict(required=True), bhard=dict(required=False, default=None), bsoft=dict(required=False, default=None), ihard=dict(required=False, default=None), isoft=dict(required=False, default=None), rtbhard=dict(required=False, default=None), rtbsoft=dict(required=False, default=None), state=dict(required=False, default='present', choices=['present', 'absent']) ), supports_check_mode=True ) quota_type = module.params['type'] name = module.params['name'] mountpoint = module.params['mountpoint'] bhard = module.params['bhard'] bsoft = module.params['bsoft'] ihard = module.params['ihard'] isoft = module.params['isoft'] rtbhard = module.params['rtbhard'] rtbsoft = module.params['rtbsoft'] state = module.params['state'] if bhard is not None: bhard = human_to_bytes(bhard) if bsoft is not None: bsoft = human_to_bytes(bsoft) if rtbhard is not None: rtbhard = human_to_bytes(rtbhard) if rtbsoft is not None: rtbsoft = human_to_bytes(rtbsoft) changed = False if os.getuid() != 0: module.fail_json(msg='You need to be root to run this module') if not os.path.ismount(mountpoint): module.fail_json(msg='%s is not a mountpoint' % mountpoint) mp = get_fs_by_mountpoint(mountpoint) if mp is None: module.fail_json(msg='%s is not a mountpoint or not located on an xfs filesystem.' % mountpoint) if quota_type == 'user': type_arg = '-u' quota_default = 'root' if name is None: name = quota_default if 'uquota' not in mp['mntopts'] \ and 'usrquota' not in mp['mntopts'] \ and 'quota' not in mp['mntopts'] \ and 'uqnoenforce' not in mp['mntopts'] \ and 'qnoenforce' not in mp['mntopts']: module.fail_json( msg='%s is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option.' % mountpoint ) try: pwd.getpwnam(name) except KeyError as e: module.fail_json(msg='User %s doesn\'t exist.' % name) if quota_type == 'group': type_arg = '-g' quota_default = 'root' if name is None: name = quota_default if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']: module.fail_json( msg='%s is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)' % (mountpoint, mp['mntopts']) ) try: grp.getgrnam(name) except KeyError as e: module.fail_json(msg='User %s doesn\'t exist.' % name) elif quota_type == 'project': type_arg = '-p' quota_default = '#0' if name is None: name = quota_default if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']: module.fail_json(msg='%s is not mounted with the pquota/prjquota/pqnoenforce option.' % mountpoint) if name != quota_default and not os.path.isfile('/etc/projects'): module.fail_json(msg='/etc/projects doesn\'t exist.') if name != quota_default and not os.path.isfile('/etc/projid'): module.fail_json(msg='/etc/projid doesn\'t exist.') if name != quota_default and name is not None and get_project_id(name) is None: module.fail_json(msg='%s hasn\'t been defined in /etc/projid.' % name) prj_set = True if name != quota_default: cmd = 'project %s' % name r = exec_quota(module, cmd, mountpoint) if r['rc'] != 0: module.fail_json(msg='Could not get project state.', cmd=cmd, retval=r) else: for line in r['stdout']: if '%s - project identifier is not set' in line: prj_set = False break if not prj_set and not module.check_mode: cmd = 'project -s' r = exec_quota(module, cmd, mountpoint) if r['rc'] != 0: module.fail_json(msg='Could not get quota realtime block report.', cmd=cmd, retval=r) else: changed = True elif not prj_set and module.check_mode: changed = True changed = False # Set limits if state == 'absent': bhard = 0 bsoft = 0 ihard = 0 isoft = 0 rtbhard = 0 rtbsoft = 0 if bsoft is not None or bhard is not None: current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b') if isoft is not None or ihard is not None: current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i') if rtbsoft is not None or rtbhard is not None: current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb') limit = [] if bsoft is not None and int(bsoft / 1024) != current_bsoft: limit.append('bsoft=%s' % bsoft) if bhard is not None and int(bhard / 1024) != current_bhard: limit.append('bhard=%s' % bhard) if isoft is not None and isoft != current_isoft: limit.append('isoft=%s' % isoft) if ihard is not None and ihard != current_ihard: limit.append('ihard=%s' % ihard) if rtbsoft is not None and int(rtbsoft / 1024) != current_rtbsoft: limit.append('rtbsoft=%s' % rtbsoft) if rtbhard is not None and int(rtbhard / 1024) != current_rtbhard: limit.append('rtbhard=%s' % rtbhard) if len(limit) > 0 and not module.check_mode: if name == quota_default: cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit)) else: cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name) r = exec_quota(module, cmd, mountpoint) if r['rc'] != 0: module.fail_json(msg='Could not set limits.', cmd=cmd, retval=r) else: changed = True elif len(limit) > 0 and module.check_mode: changed = True module.exit_json(changed=changed) return True ","def main(): module = AnsibleModule( argument_spec=dict( type=dict(required=True, choices=['user', 'group', 'project']), name=dict(type='str'), mountpoint=dict(required=True), bhard=dict(required=False, default=None), bsoft=dict(required=False, default=None), ihard=dict(required=False, default=None), isoft=dict(required=False, default=None), rtbhard=dict(required=False, default=None), rtbsoft=dict(required=False, default=None), state=dict(required=False, default='present', choices=['present', 'absent']) ), supports_check_mode=True ) quota_type = module.params['type'] name = module.params['name'] mountpoint = module.params['mountpoint'] bhard = module.params['bhard'] bsoft = module.params['bsoft'] ihard = module.params['ihard'] isoft = module.params['isoft'] rtbhard = module.params['rtbhard'] rtbsoft = module.params['rtbsoft'] state = module.params['state'] if bhard is not None: bhard = human_to_bytes(bhard) if bsoft is not None: bsoft = human_to_bytes(bsoft) if rtbhard is not None: rtbhard = human_to_bytes(rtbhard) if rtbsoft is not None: rtbsoft = human_to_bytes(rtbsoft) changed = False if os.getuid() != 0: module.fail_json(msg='You need to be root to run this module') if not os.path.ismount(mountpoint): module.fail_json(msg='%s is not a mountpoint' % mountpoint) mp = get_fs_by_mountpoint(mountpoint) if mp is None: module.fail_json(msg='%s is not a mountpoint or not located on an xfs filesystem.' % mountpoint) if quota_type == 'user': type_arg = '-u' quota_default = 'root' if name is None: name = quota_default if 'uquota' not in mp['mntopts'] \ and 'usrquota' not in mp['mntopts'] \ and 'quota' not in mp['mntopts'] \ and 'uqnoenforce' not in mp['mntopts'] \ and 'qnoenforce' not in mp['mntopts']: module.fail_json( msg='%s is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option.' % mountpoint ) try: pwd.getpwnam(name) except KeyError as e: module.fail_json(msg='User %s doesn\'t exist.' % name) if quota_type == 'group': type_arg = '-g' quota_default = 'root' if name is None: name = quota_default if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']: module.fail_json( msg='%s is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)' % (mountpoint, mp['mntopts']) ) try: grp.getgrnam(name) except KeyError as e: module.fail_json(msg='User %s doesn\'t exist.' % name) elif quota_type == 'project': type_arg = '-p' quota_default = '#0' if name is None: name = quota_default if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']: module.fail_json(msg='%s is not mounted with the pquota/prjquota/pqnoenforce option.' % mountpoint) if name != quota_default and not os.path.isfile('/etc/projects'): module.fail_json(msg='/etc/projects doesn\'t exist.') if name != quota_default and not os.path.isfile('/etc/projid'): module.fail_json(msg='/etc/projid doesn\'t exist.') if name != quota_default and name is not None and get_project_id(name) is None: module.fail_json(msg='%s hasn\'t been defined in /etc/projid.' % name) prj_set = True if name != quota_default: cmd = 'project %s' % name r = exec_quota(module, cmd, mountpoint) if r['rc'] != 0: module.fail_json(msg='Could not get project state.', cmd=cmd, retval=r) else: for line in r['stdout']: if '%s - project identifier is not set' in line: prj_set = False break if not prj_set and not module.check_mode: cmd = 'project -s' r = exec_quota(module, cmd, mountpoint) if r['rc'] != 0: module.fail_json(msg='Could not get quota realtime block report.', cmd=cmd, retval=r) else: changed = True elif not prj_set and module.check_mode: changed = True changed = False # Set limits if state == 'absent': bhard = 0 bsoft = 0 ihard = 0 isoft = 0 rtbhard = 0 rtbsoft = 0 if bsoft is not None or bhard is not None: current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b') if isoft is not None or ihard is not None: current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i') if rtbsoft is not None or rtbhard is not None: current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb') limit = [] if bsoft is not None and int(bsoft / 1024) != current_bsoft: limit.append('bsoft=%s' % bsoft) if bhard is not None and int(bhard / 1024) != current_bhard: limit.append('bhard=%s' % bhard) if isoft is not None and isoft != current_isoft: limit.append('isoft=%s' % isoft) if ihard is not None and ihard != current_ihard: limit.append('ihard=%s' % ihard) if rtbsoft is not None and int(rtbsoft / 1024) != current_rtbsoft: limit.append('rtbsoft=%s' % rtbsoft) if rtbhard is not None and int(rtbhard / 1024) != current_rtbhard: limit.append('rtbhard=%s' % rtbhard) if len(limit) > 0 and not module.check_mode: if name == quota_default: cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit)) else: cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name) r = exec_quota(module, cmd, mountpoint) if r['rc'] != 0: module.fail_json(msg='Could not set limits.', cmd=cmd, retval=r) else: changed = True elif len(limit) > 0 and module.check_mode: changed = True module.exit_json(changed=changed) return True " 2707,"def recall_score( y_true, y_pred, *, labels=None, pos_label=1, average=""binary"", sample_weight=None, zero_division=""warn"", ): """"""Compute the recall. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The best value is 1 and the worst value is 0. Read more in the :ref:`User Guide `. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array-like, default=None The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 Parameter `labels` improved for multiclass problem. pos_label : str or int, default=1 The class to report if ``average='binary'`` and the data is binary. If the data are multiclass or multilabel, this will be ignored; setting ``labels=[pos_label]`` and ``average != 'binary'`` will report scores for that label only. average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, \ default='binary' This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives (or summing their weights). Gives same result as 'weighted'. Except in multilabel classification, this and 'weighted' are both identical to the result of :func:`accuracy_score`. It can result in an F-score that is not between precision and recall. ``'macro'``: Calculate metrics for each label, and find their simple (unweighted) mean. This does not take label imbalance into account. This is identical to the result of :func:`balanced_accuracy_score` (with its default adjusted=False), except in multilabel classification which is not supported by :func:`balanced_accuracy_score`. ``'weighted'``: Calculate metrics for each label, and find their average weighted by support (the number or total weight of true instances for each label). This alters 'macro' to account for label imbalance. Gives same result as 'micro'. Except in multilabel classification, this and 'micro' are both identical to the result of :func:`accuracy_score`. It can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). sample_weight : array-like of shape (n_samples,), default=None Sample weights. zero_division : ""warn"", 0 or 1, default=""warn"" Sets the value to return when there is a zero division. If set to ""warn"", this acts as 0, but warnings are also raised. Returns ------- recall : float (if average is not None) or array of float of shape \ (n_unique_labels,) Recall of the positive class in binary classification or weighted average of the recall of each class for the multiclass task. See Also -------- precision_recall_fscore_support : Compute precision, recall, F-measure and support for each class. precision_score : Compute the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. balanced_accuracy_score : Compute balanced accuracy to deal with imbalanced datasets. multilabel_confusion_matrix : Compute a confusion matrix for each class or sample. PrecisionRecallDisplay.from_estimator : Plot precision-recall curve given an estimator and some data. PrecisionRecallDisplay.from_predictions : Plot precision-recall curve given binary class predictions. Notes ----- When ``true positive + false negative == 0``, recall returns 0 and raises ``UndefinedMetricWarning``. This behavior can be modified with ``zero_division``. Examples -------- >>> from sklearn.metrics import recall_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> recall_score(y_true, y_pred, average='macro') 0.33... >>> recall_score(y_true, y_pred, average='micro') 0.33... >>> recall_score(y_true, y_pred, average='weighted') 0.33... >>> recall_score(y_true, y_pred, average=None) array([1., 0., 0.]) >>> y_true = [0, 0, 0, 0, 0, 0] >>> recall_score(y_true, y_pred, average=None) array([0.5, 0. , 0. ]) >>> recall_score(y_true, y_pred, average=None, zero_division=1) array([0.5, 1. , 1. ]) >>> # multilabel classification >>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]] >>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]] >>> recall_score(y_true, y_pred, average=None) array([1. , 1. , 0.5]) """""" _, r, _, _ = precision_recall_fscore_support( y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=(""recall"",), sample_weight=sample_weight, zero_division=zero_division, ) return r ","def recall_score( y_true, y_pred, *, labels=None, pos_label=1, average=""binary"", sample_weight=None, zero_division=""warn"", ): """"""Compute the recall. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The best value is 1 and the worst value is 0. Read more in the :ref:`User Guide `. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array-like, default=None The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 Parameter `labels` improved for multiclass problem. pos_label : str or int, default=1 The class to report if ``average='binary'`` and the data is binary. If the data are multiclass or multilabel, this will be ignored; setting ``labels=[pos_label]`` and ``average != 'binary'`` will report scores for that label only. average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, \ default='binary' This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives (or summing their weights). Gives same result as 'weighted'. Except in multilabel classification, this and 'weighted' are both identical to the result of :func:`accuracy_score`. It can result in an F-score that is not between precision and recall. ``'macro'``: Calculate metrics for each label, and find their simple (unweighted) mean. This does not take label imbalance into account. This is identical to the result of :func:`balanced_accuracy_score` (with its default adjusted=False), except in multilabel classification which is not supported by :func:`balanced_accuracy_score`. ``'weighted'``: Calculate metrics for each label, and find their average weighted by support (the number or total weight of true instances for each label). This alters 'macro' to account for label imbalance. Gives same result as 'micro'. Except in multilabel classification, this and 'micro' are both identical to the result of :func:`accuracy_score`. It can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). sample_weight : array-like of shape (n_samples,), default=None Sample weights. zero_division : ""warn"", 0 or 1, default=""warn"" Sets the value to return when there is a zero division. If set to ""warn"", this acts as 0, but warnings are also raised. Returns ------- recall : float (if average is not None) or array of float of shape \ (n_unique_labels,) Recall of the positive class in binary classification or weighted average of the recall of each class for the multiclass task. See Also -------- precision_recall_fscore_support : Compute precision, recall, F-measure and support for each class. precision_score : Compute the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. balanced_accuracy_score : Compute balanced accuracy to deal with imbalanced datasets. multilabel_confusion_matrix : Compute a confusion matrix for each class or sample. PrecisionRecallDisplay.from_estimator : Plot precision-recall curve given an estimator and some data. PrecisionRecallDisplay.from_predictions : Plot precision-recall curve given binary class predictions. Notes ----- When ``true positive + false negative == 0``, recall returns 0 and raises ``UndefinedMetricWarning``. This behavior can be modified with ``zero_division``. Examples -------- >>> from sklearn.metrics import recall_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> recall_score(y_true, y_pred, average='macro') 0.33... >>> recall_score(y_true, y_pred, average='micro') 0.33... >>> recall_score(y_true, y_pred, average='weighted') 0.33... >>> recall_score(y_true, y_pred, average=None) array([1., 0., 0.]) >>> y_true = [0, 0, 0, 0, 0, 0] >>> recall_score(y_true, y_pred, average=None) array([0.5, 0. , 0. ]) >>> recall_score(y_true, y_pred, average=None, zero_division=1) array([0.5, 1. , 1. ]) >>> # multilabel classification >>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]] >>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]] >>> recall_score(y_true, y_pred, average=None) array([1. , 1. , 0.5]) """""" _, r, _, _ = precision_recall_fscore_support( y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=(""recall"",), sample_weight=sample_weight, zero_division=zero_division, ) return r " 2689,"def _cov(X, shrinkage=None, covariance_estimator=None): """"""Estimate covariance matrix (using optional covariance_estimator). Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. shrinkage : {'empirical', 'auto'} or float, default=None Shrinkage parameter, possible values: - None or 'empirical': no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Shrinkage parameter is ignored if `covariance_estimator` is not None. covariance_estimator : estimator, default=None If not None, `covariance_estimator` is used to estimate the covariance matrices instead of relying on the empirical covariance estimator (with potential shrinkage). The object should have a fit method and a ``covariance_`` attribute like the estimators in :mod:`sklearn.covariance``. if None the shrinkage parameter drives the estimate. .. versionadded:: 0.24 Returns ------- s : ndarray of shape (n_features, n_features) Estimated covariance matrix. """""" if covariance_estimator is None: shrinkage = ""empirical"" if shrinkage is None else shrinkage if isinstance(shrinkage, str): if shrinkage == ""auto"": sc = StandardScaler() # standardize features X = sc.fit_transform(X) s = ledoit_wolf(X)[0] # rescale s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] elif shrinkage == ""empirical"": s = empirical_covariance(X) else: raise ValueError(""unknown shrinkage parameter"") elif isinstance(shrinkage, Number): if shrinkage < 0 or shrinkage > 1: raise ValueError(""shrinkage parameter must be between 0 and 1"") s = shrunk_covariance(empirical_covariance(X), shrinkage) else: raise TypeError(""shrinkage must be a float or a string"") else: if shrinkage is not None and shrinkage != 0: raise ValueError( ""covariance_estimator and shrinkage parameters "" ""are not None. Only one of the two can be set."" ) covariance_estimator.fit(X) if not hasattr(covariance_estimator, ""covariance_""): raise ValueError( ""%s does not have a covariance_ attribute"" % covariance_estimator.__class__.__name__ ) s = covariance_estimator.covariance_ return s ","def _cov(X, shrinkage=None, covariance_estimator=None): """"""Estimate covariance matrix (using optional covariance_estimator). Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. shrinkage : {'empirical', 'auto'} or float, default=None Shrinkage parameter, possible values: - None or 'empirical': no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Shrinkage parameter is ignored if `covariance_estimator` is not None. covariance_estimator : estimator, default=None If not None, `covariance_estimator` is used to estimate the covariance matrices instead of relying on the empirical covariance estimator (with potential shrinkage). The object should have a fit method and a ``covariance_`` attribute like the estimators in :mod:`sklearn.covariance``. if None the shrinkage parameter drives the estimate. .. versionadded:: 0.24 Returns ------- s : ndarray of shape (n_features, n_features) Estimated covariance matrix. """""" if covariance_estimator is None: shrinkage = ""empirical"" if shrinkage is None else shrinkage if isinstance(shrinkage, str): if shrinkage == ""auto"": sc = StandardScaler() # standardize features X = sc.fit_transform(X) s = ledoit_wolf(X)[0] # rescale s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] elif shrinkage == ""empirical"": s = empirical_covariance(X) else: raise ValueError(""unknown shrinkage parameter"") elif isinstance(shrinkage, Real): if shrinkage < 0 or shrinkage > 1: raise ValueError(""shrinkage parameter must be between 0 and 1"") s = shrunk_covariance(empirical_covariance(X), shrinkage) else: raise TypeError(""shrinkage must be a float or a string"") else: if shrinkage is not None and shrinkage != 0: raise ValueError( ""covariance_estimator and shrinkage parameters "" ""are not None. Only one of the two can be set."" ) covariance_estimator.fit(X) if not hasattr(covariance_estimator, ""covariance_""): raise ValueError( ""%s does not have a covariance_ attribute"" % covariance_estimator.__class__.__name__ ) s = covariance_estimator.covariance_ return s " 1269,"def load(filename, **kwargs): r"""""" Load file given filename, guessing at file type Parameters ---------- filename : str or os.PathLike specification of file to load \*\*kwargs : keyword arguments Keyword arguments to format-specific load Returns ------- img : ``SpatialImage`` Image of guessed type """""" filename = _stringify_path(filename) # Check file exists and is not empty try: stat_result = os.stat(filename) except OSError: raise FileNotFoundError(f""No such file or no access: '{filename}'"") if stat_result.st_size <= 0: raise ImageFileError(f""Empty file: '{filename}'"") sniff = None for image_klass in all_image_classes: is_valid, sniff = image_klass.path_maybe_image(filename, sniff) if is_valid: img = image_klass.from_filename(filename, **kwargs) return img # reset sniff to None # as we don't want to sniff the contents of the decompressed container sniff = None matches, msg = _signature_matches_extension(filename, sniff) if not matches: raise ImageFileError(msg) raise ImageFileError(f'Cannot work out file type of ""{filename}""') ","def load(filename, **kwargs): r"""""" Load file given filename, guessing at file type Parameters ---------- filename : str or os.PathLike specification of file to load \*\*kwargs : keyword arguments Keyword arguments to format-specific load Returns ------- img : ``SpatialImage`` Image of guessed type """""" filename = _stringify_path(filename) # Check file exists and is not empty try: stat_result = os.stat(filename) except OSError: raise FileNotFoundError(f""No such file or no access: '{filename}'"") if stat_result.st_size <= 0: raise ImageFileError(f""Empty file: '{filename}'"") sniff = None for image_klass in all_image_classes: is_valid, sniff = image_klass.path_maybe_image(filename, sniff) if is_valid: img = image_klass.from_filename(filename, **kwargs) return img matches, msg = _signature_matches_extension(filename) if not matches: raise ImageFileError(msg) raise ImageFileError(f'Cannot work out file type of ""{filename}""') " 41830,"def _fast_non_dominated_sort( population: List[""multi_objective.trial.FrozenMultiObjectiveTrial""], directions: List[optuna.study.StudyDirection], ) -> List[List[""multi_objective.trial.FrozenMultiObjectiveTrial""]]: dominated_count = defaultdict(int) # type: DefaultDict[int, int] dominates_list = defaultdict(list) for p, q in itertools.combinations(population, 2): if p._dominates(q, directions): dominates_list[p.number].append(q.number) dominated_count[q.number] += 1 elif q._dominates(p, directions): dominates_list[q.number].append(p.number) dominated_count[p.number] += 1 population_per_rank = [] while len(population) > 0: non_dominated_population = [] i = 0 while i < len(population): if dominated_count[population[i].number] == 0: individual = population[i] if i == len(population) - 1: population.pop() else: population[i] = population.pop() non_dominated_population.append(individual) else: i += 1 for x in non_dominated_population: for y in dominates_list[x.number]: dominated_count[y] -= 1 assert non_dominated_population != [] population_per_rank.append(non_dominated_population) return population_per_rank ","def _fast_non_dominated_sort( population: List[""multi_objective.trial.FrozenMultiObjectiveTrial""], directions: List[optuna.study.StudyDirection], ) -> List[List[""multi_objective.trial.FrozenMultiObjectiveTrial""]]: dominated_count = defaultdict(int) # type: DefaultDict[int, int] dominates_list = defaultdict(list) for p, q in itertools.combinations(population, 2): if p._dominates(q, directions): dominates_list[p.number].append(q.number) dominated_count[q.number] += 1 elif q._dominates(p, directions): dominates_list[q.number].append(p.number) dominated_count[p.number] += 1 population_per_rank = [] while len(population) > 0: non_dominated_population = [] i = 0 while i < len(population): if dominated_count[population[i].number] == 0: individual = population[i] if i == len(population) - 1: population.pop() else: population[i] = population.pop() non_dominated_population.append(individual) else: i += 1 for x in non_dominated_population: for y in dominates_list[x.number]: dominated_count[y] -= 1 assert non_dominated_population population_per_rank.append(non_dominated_population) return population_per_rank " 4148,"def p_namedexpr_test(s): # defined in the LL parser as # namedexpr_test: test [':=' test] # The requirement that the LHS is a name is not enforced in the grammar. # For comparison the PEG parser does: # 1. look for ""name :="", if found it's definitely a named expression # so look for expression # 2. Otherwise, look for expression lhs = p_test_no_namedexpr_check(s) if s.sy == ':=': position = s.position() if not lhs.is_name: s.error(""Left-hand side of assignment expression must be an identifier"") s.next() rhs = p_test(s) return ExprNodes.AssignmentExpressionNode(position, lhs=lhs, rhs=rhs) return lhs ","def p_namedexpr_test(s): # defined in the LL parser as # namedexpr_test: test [':=' test] # The requirement that the LHS is a name is not enforced in the grammar. # For comparison the PEG parser does: # 1. look for ""name :="", if found it's definitely a named expression # so look for expression # 2. Otherwise, look for expression lhs = p_test_no_namedexpr_check(s) if s.sy == ':=': position = s.position() if not lhs.is_name: s.error(""Left-hand side of assignment expression must be an identifier"", fatal=False) s.next() rhs = p_test(s) return ExprNodes.AssignmentExpressionNode(position, lhs=lhs, rhs=rhs) return lhs " 48164,"def check_several_lists_elements_type(parameter_name_expected_type: list): """""" Function checks if parameters lists exist and raises ValueError exception if lists elements have unexpected type :param parameter_name_expected_type: list with tuples that contain parameter with nested elements, name for exception message and expected type """""" for parameter, name, expected_type in parameter_name_expected_type: if parameter: check_nested_elements_type( iterable=parameter, parameter_name=name, expected_type=expected_type ) ","def check_several_lists_elements_type(parameter_name_expected_type: list): """""" Function checks if parameters lists exist and raises ValueError exception if lists elements have unexpected type :param parameter_name_expected_type: list with tuples that contain parameter with nested elements, name for exception message and expected type """""" for parameter, name, expected_type in parameter_name_expected_type: if parameter is not None: check_nested_elements_type( iterable=parameter, parameter_name=name, expected_type=expected_type ) " 9362,"def test_wrap_var_list(): assert not isinstance(wrap_var(['foo']), AnsibleUnsafe) assert isinstance(wrap_var(['foo'])[0], AnsibleUnsafe) ","def test_wrap_var_list(): assert isinstance(wrap_var(['foo']), list) assert isinstance(wrap_var(['foo'])[0], AnsibleUnsafe) " 36740,"def magic_html_parser(html_text, partfiles): """"""Return safety sanitized html linked to partfiles. Rewrite the href=""cid:...."" attributes to point to the filenames in partfiles. Though not trivial, this should be possible using html.parser. """""" raise NotImplementedError(""Add the magic needed"") ","def magic_html_parser(html_text, partfiles): """"""Return safety-sanitized html linked to partfiles. Rewrite the href=""cid:...."" attributes to point to the filenames in partfiles. Though not trivial, this should be possible using html.parser. """""" raise NotImplementedError(""Add the magic needed"") " 56782,"def _group_objects_by_db(objects): """""" :param objects: Deserialized object dictionaries :return: List of tuples of (db_alias, [object,...]) """""" objects_by_db = defaultdict(list) for obj in objects: app_label = obj['model'] model = apps.get_model(app_label) router_hints = {} if hasattr(model, 'partition_attr'): try: partition_value = obj['fields'][model.partition_attr] except KeyError: # in the case of foreign keys the serialized field name is the # name of the foreign key attribute try: field = [ field for field in model._meta.fields if field.column == model.partition_attr ][0] partition_value = obj['fields'][field.name] except KeyError: if model.partition_attr == model._meta.pk.attname: partition_value = obj['pk'] router_hints[HINT_PARTITION_VALUE] = partition_value db_alias = router.db_for_write(model, **router_hints) objects_by_db[db_alias].append(obj) return list(objects_by_db.items()) ","def _group_objects_by_db(objects): """""" :param objects: Deserialized object dictionaries :return: List of tuples of (db_alias, [object,...]) """""" objects_by_db = defaultdict(list) for obj in objects: app_label = obj['model'] model = apps.get_model(app_label) router_hints = {} if hasattr(model, 'partition_attr'): try: partition_value = obj['fields'][model.partition_attr] except KeyError: # in the case of foreign keys the serialized field name is the # name of the foreign key attribute field = [ field for field in model._meta.fields if field.column == model.partition_attr ][0] try: partition_value = obj['fields'][field.name] except KeyError: if model.partition_attr == model._meta.pk.attname: partition_value = obj['pk'] router_hints[HINT_PARTITION_VALUE] = partition_value db_alias = router.db_for_write(model, **router_hints) objects_by_db[db_alias].append(obj) return list(objects_by_db.items()) " 7861,"def test_fission_yield_distribution(): """"""Test an energy-dependent yield distribution"""""" yield_dict = { 0.0253: {""Xe135"": 7.85e-4, ""Gd155"": 4.08e-12, ""Sm149"": 1.71e-12}, 1.40e7: {""Xe135"": 4.54e-3, ""Gd155"": 5.83e-8, ""Sm149"": 2.69e-8}, 5.00e5: {""Xe135"": 1.12e-3, ""Gd155"": 1.32e-12}, # drop Sm149 } yield_dist = nuclide.FissionYieldDistribution(yield_dict) assert len(yield_dist) == len(yield_dict) assert yield_dist.energies == tuple(sorted(yield_dict.keys())) for exp_ene, exp_dist in yield_dict.items(): act_dist = yield_dict[exp_ene] for exp_prod, exp_yield in exp_dist.items(): assert act_dist[exp_prod] == exp_yield exp_yield = numpy.array([ [4.08e-12, 1.71e-12, 7.85e-4], [1.32e-12, 0.0, 1.12e-3], [5.83e-8, 2.69e-8, 4.54e-3]]) assert numpy.array_equal(yield_dist.yield_matrix, exp_yield) # Test the operations / special methods for fission yield orig_yields = yield_dist[0.0253] assert len(orig_yields) == len(yield_dict[0.0253]) for key, value in yield_dict[0.0253].items(): assert key in orig_yields assert orig_yields[key] == value # __getitem__ return yields as a view into yield matrix assert orig_yields.yields.base is yield_dist.yield_matrix # Scale and increment fission yields mod_yields = orig_yields * 2 assert numpy.array_equal(orig_yields.yields * 2, mod_yields.yields) mod_yields += orig_yields assert numpy.array_equal(orig_yields.yields * 3, mod_yields.yields) mod_yields = 2.0 * orig_yields assert numpy.array_equal(orig_yields.yields * 2, mod_yields.yields) mod_yields = numpy.float64(2.0) * orig_yields assert numpy.array_equal(orig_yields.yields * 2, mod_yields.yields) # Failure modes for adding, multiplying yields similar = numpy.empty_like(orig_yields.yields) with pytest.raises(TypeError): orig_yields + similar with pytest.raises(TypeError): similar + orig_yields with pytest.raises(TypeError): orig_yields += similar with pytest.raises(TypeError): orig_yields * similar with pytest.raises(TypeError): similar * orig_yields with pytest.raises(TypeError): orig_yields *= similar # Test restriction of fission products strict_restrict = yield_dist.restrict_products([""Xe135"", ""Sm149""]) with_extras = yield_dist.restrict_products( [""Xe135"", ""Sm149"", ""H1"", ""U235""]) assert strict_restrict.products == (""Sm149"", ""Xe135"", ) assert strict_restrict.energies == yield_dist.energies assert with_extras.products == (""Sm149"", ""Xe135"", ) assert with_extras.energies == yield_dist.energies for ene, new_yields in strict_restrict.items(): for product in strict_restrict.products: assert new_yields[product] == yield_dist[ene][product] assert with_extras[ene][product] == yield_dist[ene][product] assert yield_dist.restrict_products([""U235""]) is None ","def test_fission_yield_distribution(): """"""Test an energy-dependent yield distribution"""""" yield_dict = { 0.0253: {""Xe135"": 7.85e-4, ""Gd155"": 4.08e-12, ""Sm149"": 1.71e-12}, 1.40e7: {""Xe135"": 4.54e-3, ""Gd155"": 5.83e-8, ""Sm149"": 2.69e-8}, 5.00e5: {""Xe135"": 1.12e-3, ""Gd155"": 1.32e-12}, # drop Sm149 } yield_dist = nuclide.FissionYieldDistribution(yield_dict) assert len(yield_dist) == len(yield_dict) assert yield_dist.energies == tuple(sorted(yield_dict.keys())) for exp_ene, exp_dist in yield_dict.items(): act_dist = yield_dict[exp_ene] for exp_prod, exp_yield in exp_dist.items(): assert act_dist[exp_prod] == exp_yield exp_yield = numpy.array([ [4.08e-12, 1.71e-12, 7.85e-4], [1.32e-12, 0.0, 1.12e-3], [5.83e-8, 2.69e-8, 4.54e-3]]) assert numpy.array_equal(yield_dist.yield_matrix, exp_yield) # Test the operations / special methods for fission yield orig_yields = yield_dist[0.0253] assert len(orig_yields) == len(yield_dict[0.0253]) for key, value in yield_dict[0.0253].items(): assert key in orig_yields assert orig_yields[key] == value # __getitem__ return yields as a view into yield matrix assert orig_yields.yields.base is yield_dist.yield_matrix # Scale and increment fission yields mod_yields = orig_yields * 2 assert numpy.array_equal(orig_yields.yields * 2, mod_yields.yields) mod_yields += orig_yields assert numpy.array_equal(orig_yields.yields * 3, mod_yields.yields) mod_yields = 2.0 * orig_yields assert numpy.array_equal(orig_yields.yields * 2, mod_yields.yields) mod_yields = numpy.float64(2.0) * orig_yields assert numpy.array_equal(orig_yields.yields * 2, mod_yields.yields) # Failure modes for adding, multiplying yields similar = numpy.empty_like(orig_yields.yields) with pytest.raises(TypeError): orig_yields + similar with pytest.raises(TypeError): similar + orig_yields with pytest.raises(TypeError): orig_yields += similar with pytest.raises(TypeError): orig_yields * similar with pytest.raises(TypeError): similar * orig_yields with pytest.raises(TypeError): orig_yields *= similar # Test restriction of fission products strict_restrict = yield_dist.restrict_products([""Xe135"", ""Sm149""]) with_extras = yield_dist.restrict_products( [""Xe135"", ""Sm149"", ""H1"", ""U235""]) assert strict_restrict.products == (""Sm149"", ""Xe135"") assert strict_restrict.energies == yield_dist.energies assert with_extras.products == (""Sm149"", ""Xe135"", ) assert with_extras.energies == yield_dist.energies for ene, new_yields in strict_restrict.items(): for product in strict_restrict.products: assert new_yields[product] == yield_dist[ene][product] assert with_extras[ene][product] == yield_dist[ene][product] assert yield_dist.restrict_products([""U235""]) is None " 45427,"def export_config_help(filename: str) -> None: """""" Export all configs help messages to the CSV file. Parameters ---------- filename : str Name of the file to export configs data. """""" configs_data = [] for objname in sorted(globals()): obj = globals()[objname] if isinstance(obj, type) and issubclass(obj, Parameter) and not obj.is_abstract: data = { ""Config Name"": obj.__name__, ""Env. Variable Name"": getattr( obj, ""varname"", ""not backed by environment"" ), ""Default Value"": obj._get_default() if obj.__name__ != ""RayRedisPassword"" else ""random string"", # `Notes` `-` underlining can't be correctly parsed inside csv table by sphinx ""Description"": dedent(obj.__doc__).replace(""Notes\n-----"", ""Notes:\n"") if obj.__doc__ else """", ""Options"": obj.choices, } configs_data.append(data) pandas.DataFrame( configs_data, columns=[ ""Config Name"", ""Env. Variable Name"", ""Default Value"", ""Description"", ""Options"", ], ).to_csv(filename, index=False) ","def export_config_help(filename: str) -> None: """""" Export all configs help messages to the CSV file. Parameters ---------- filename : str Name of the file to export configs data. """""" configs_data = [] for objname in sorted(globals()): obj = globals()[objname] if isinstance(obj, type) and issubclass(obj, Parameter) and not obj.is_abstract: data = { ""Config Name"": obj.__name__, ""Env. Variable Name"": getattr( obj, ""varname"", ""not backed by environment"" ), ""Default Value"": obj._get_default() if obj.__name__ != ""RayRedisPassword"" else ""random string"", # `Notes` `-` underlining can't be correctly parsed inside csv table by sphinx ""Description"": dedent(obj.__doc__ or """").replace(""Notes\n-----"", ""Notes:\n""), ""Options"": obj.choices, } configs_data.append(data) pandas.DataFrame( configs_data, columns=[ ""Config Name"", ""Env. Variable Name"", ""Default Value"", ""Description"", ""Options"", ], ).to_csv(filename, index=False) " 44037,"def expected_shapes(n_wires): # compute the expected shapes for a given number of wires n_if = n_wires * (n_wires - 1) // 2 expected = [(n_if,)] * 2 + [(n_wires,)] return expected ","def expected_shapes(n_wires): # compute the expected shapes for a given number of wires n_if = n_wires * (n_wires - 1) / 2 expected = [(n_if,)] * 2 + [(n_wires,)] return expected " 8395,"def test_template_match_minimal_overlap(): """""" Test template_match when both observed and template spectra have minimal overlap on the wavelength axis """""" print(""minimal overlap test"") # Seed np.random so that results are consistent np.random.seed(42) # Create test spectra spec_axis = np.linspace(0, 50, 50) * u.AA spec_axis_no_overlap = np.linspace(45, 95, 50) * u.AA spec = Spectrum1D(spectral_axis=spec_axis, flux=np.random.randn(50) * u.Jy, uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy')) spec1 = Spectrum1D(spectral_axis=spec_axis_no_overlap, flux=np.random.randn(50) * u.Jy, uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy')) # Get result from template_match tm_result = template_comparison.template_match(spec, spec1) # Create new spectrum for comparison spec_result = Spectrum1D(spectral_axis=spec_axis, flux=spec1.flux * template_comparison._normalize_for_template_matching(spec, spec1)) # assert quantity_allclose(tm_result[0].flux, spec_result.flux, atol=0.01*u.Jy) assert np.isnan(tm_result[1]) ","def test_template_match_minimal_overlap(): """""" Test template_match when both observed and template spectra have minimal overlap on the wavelength axis """""" print(""minimal overlap test"") # Seed np.random so that results are consistent np.random.seed(42) # Create test spectra spec_axis = np.linspace(0, 50, 50) * u.AA spec_axis_min_overlap = np.linspace(45, 95, 50) * u.AA spec = Spectrum1D(spectral_axis=spec_axis, flux=np.random.randn(50) * u.Jy, uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy')) spec1 = Spectrum1D(spectral_axis=spec_axis_no_overlap, flux=np.random.randn(50) * u.Jy, uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy')) # Get result from template_match tm_result = template_comparison.template_match(spec, spec1) # Create new spectrum for comparison spec_result = Spectrum1D(spectral_axis=spec_axis, flux=spec1.flux * template_comparison._normalize_for_template_matching(spec, spec1)) # assert quantity_allclose(tm_result[0].flux, spec_result.flux, atol=0.01*u.Jy) assert np.isnan(tm_result[1]) " 7125,"def perimeter(image, neighbourhood=4): """"""Calculate total perimeter of all objects in binary image. Parameters ---------- image : (N, M) ndarray 2D binary image. neighbourhood : 4 or 8, optional Neighborhood connectivity for border pixel determination. It's used to compute the contour. A higher neighbourhood widens the border on which the perimeter is computed. Returns ------- perimeter : float Total perimeter of all objects in binary image. References ---------- .. [1] K. Benkrid, D. Crookes. Design and FPGA Implementation of a Perimeter Estimator. The Queen's University of Belfast. http://www.cs.qub.ac.uk/~d.crookes/webpubs/papers/perimeter.doc Examples -------- >>> from skimage import data, util >>> from skimage.measure import label # coins image (binary) >>> img_coins = util.img_as_ubyte(data.coins()) > 110 # total perimeter of all objects in the image >>> perimeter(img_coins, neighbourhood=4) 7796.8679964360044 >>> perimeter(img_coins, neighbourhood=8) 8806.2680733252855 """""" if image.ndim > 2: raise NotImplementedError('perimeter does not support 3D images') if neighbourhood == 4: strel = STREL_4 else: strel = STREL_8 image = image.astype(np.uint8) eroded_image = ndi.binary_erosion(image, strel, border_value=0) border_image = image - eroded_image perimeter_weights = np.zeros(50, dtype=np.double) perimeter_weights[[5, 7, 15, 17, 25, 27]] = 1 perimeter_weights[[21, 33]] = sqrt(2) perimeter_weights[[13, 23]] = (1 + sqrt(2)) / 2 perimeter_image = ndi.convolve(border_image, np.array([[10, 2, 10], [ 2, 1, 2], [10, 2, 10]]), mode='constant', cval=0) # You can also write # return perimeter_weights[perimeter_image].sum() # but that was measured as taking much longer than bincount + np.dot (5x # as much time) perimeter_histogram = np.bincount(perimeter_image.ravel(), minlength=50) total_perimeter = perimeter_histogram @ perimeter_weights return total_perimeter ","def perimeter(image, neighbourhood=4): """"""Calculate total perimeter of all objects in binary image. Parameters ---------- image : (N, M) ndarray 2D binary image. neighbourhood : 4 or 8, optional Neighborhood connectivity for border pixel determination. It's used to compute the contour. A higher neighbourhood widens the border on which the perimeter is computed. Returns ------- perimeter : float Total perimeter of all objects in binary image. References ---------- .. [1] K. Benkrid, D. Crookes. Design and FPGA Implementation of a Perimeter Estimator. The Queen's University of Belfast. http://www.cs.qub.ac.uk/~d.crookes/webpubs/papers/perimeter.doc Examples -------- >>> from skimage import data, util >>> from skimage.measure import label # coins image (binary) >>> img_coins = util.img_as_ubyte(data.coins()) > 110 # total perimeter of all objects in the image >>> perimeter(img_coins, neighbourhood=4) 7796.8679964360044 >>> perimeter(img_coins, neighbourhood=8) 8806.2680733252855 """""" if image.ndim != 2: raise NotImplementedError('perimeter does not support 3D images') if neighbourhood == 4: strel = STREL_4 else: strel = STREL_8 image = image.astype(np.uint8) eroded_image = ndi.binary_erosion(image, strel, border_value=0) border_image = image - eroded_image perimeter_weights = np.zeros(50, dtype=np.double) perimeter_weights[[5, 7, 15, 17, 25, 27]] = 1 perimeter_weights[[21, 33]] = sqrt(2) perimeter_weights[[13, 23]] = (1 + sqrt(2)) / 2 perimeter_image = ndi.convolve(border_image, np.array([[10, 2, 10], [ 2, 1, 2], [10, 2, 10]]), mode='constant', cval=0) # You can also write # return perimeter_weights[perimeter_image].sum() # but that was measured as taking much longer than bincount + np.dot (5x # as much time) perimeter_histogram = np.bincount(perimeter_image.ravel(), minlength=50) total_perimeter = perimeter_histogram @ perimeter_weights return total_perimeter " 28155,"def get_runid_from_guid(conn: SomeConnection, guid: str) -> Union[int, None]: """""" Get the run_id of a run based on the guid Args: conn: connection to the database guid: the guid to look up Returns: The run_id if found, else -1. Raises: RuntimeError if more than one run with the given GUID exists """""" query = """""" SELECT run_id FROM runs WHERE guid = ? """""" cursor = conn.cursor() cursor.execute(query, (guid,)) rows = cursor.fetchall() if len(rows) == 0: run_id = -1 elif len(rows) > 1: errormssg = ('Critical consistency error: multiple runs with' f' the same GUID found! {len(rows)} runs have GUID ' f'{guid}') log.critical(errormssg) raise RuntimeError(errormssg) else: run_id = int(rows[0]['run_id']) return run_id ","def get_runid_from_guid(conn: SomeConnection, guid: str) -> Union[int, None]: """""" Get the run_id of a run based on the guid Args: conn: connection to the database guid: the guid to look up Returns: The run_id if found, else -1. Raises: RuntimeError if more than one run with the given GUID exists """""" query = """""" SELECT run_id FROM runs WHERE guid = ? """""" cursor = conn.cursor() cursor.execute(query, (guid,)) rows = cursor.fetchall() if len(rows) == 0: run_id = -1 elif len(rows) > 1: errormssg = ('Critical consistency error: multiple runs with' f' the same GUID found! {len(rows)} runs have GUID ' f'{guid}') log.critical(errormssg) raise RuntimeError(errormssg) else: run_id = int(rows[0]['run_id']) return run_id " 31498,"def allocate_hosts_command(args): client = aws_session( region=args.get('region'), roleArn=args.get('roleArn'), roleSessionName=args.get('roleSessionName'), roleSessionDuration=args.get('roleSessionDuration')) availability_zone = args.get('availability_zone') quantity = int(args.get('quantity')) kwargs = {} if args.get('auto_placement'): kwargs.update({'AutoPlacement': args.get('auto_placement')}) if args.get('client_token'): kwargs.update({'ClientToken': args.get('client_token')}) if args.get('instance_type'): kwargs.update({'InstanceType': args.get('instance_type')}) if args.get('instance_family'): kwargs.update({'InstanceFamily': args.get('instance_family')}) if args.get('host_recovery'): kwargs.update({'HostRecovery': args.get('host_recovery')}) response = client.allocate_hosts(AvailabilityZone=availability_zone, Quantity=quantity, **kwargs) data = ({ 'HostId': response['HostIds'] }) ec = {'AWS.EC2.Host': data} human_readable = tableToMarkdown('AWS EC2 Dedicated Host ID', data) return_outputs(human_readable, ec) ","def allocate_hosts_command(args): client = aws_session( region=args.get('region'), roleArn=args.get('roleArn'), roleSessionName=args.get('roleSessionName'), roleSessionDuration=args.get('roleSessionDuration')) availability_zone = args.get('availability_zone') quantity = int(args.get('quantity')) kwargs = {} if args.get('auto_placement'): kwargs.update({'AutoPlacement': args.get('auto_placement')}) if args.get('client_token'): kwargs.update({'ClientToken': args.get('client_token')}) if args.get('instance_type'): kwargs.update({'InstanceType': args.get('instance_type')}) if args.get('instance_family'): kwargs.update({'InstanceFamily': args.get('instance_family')}) if args.get('host_recovery'): kwargs.update({'HostRecovery': args.get('host_recovery')}) response = client.allocate_hosts(AvailabilityZone=availability_zone, Quantity=quantity, **kwargs) data = ({ 'HostId': response.get('HostIds') }) ec = {'AWS.EC2.Host': data} human_readable = tableToMarkdown('AWS EC2 Dedicated Host ID', data) return_outputs(human_readable, ec) " 25653,"def base_conditional(Kmn: tf.Tensor, Kmm: tf.Tensor, Knn: tf.Tensor, function: tf.Tensor, *, full_cov=False, q_sqrt=None, white=False): """""" Given a g1 and g2, and distribution p and q such that p(g2) = N(g2; 0, Kmm) p(g1) = N(g1; 0, Knn) p(g1 | g2) = N(g1; Knm(Kmm^-1)g2, Knn - Knm(Kmm^-1)Kmn) And q(g2) = N(g2; f, q_sqrt * q_sqrt^T) This method computes the mean and (co)variance of q(g1) = \int q(g2) p(g1|g2) :param Kmn: [M, ..., N] :param Kmm: [M, M] :param Knn: [..., N, N] or N :param f: [M, R] :param full_cov: bool :param q_sqrt: None or [R, M, M] (lower triangular) :param white: bool :return: [N, R] or [R, N, N] """""" # compute kernel stuff num_func = tf.shape(function)[-1] # R N = tf.shape(Kmn)[-1] M = tf.shape(function)[-2] # get the leadings dims in Kmn to the front of the tensor # if Kmn has rank two, i.e. [M, N], this is the identity op. K = tf.rank(Kmn) perm = tf.concat( [ tf.reshape(tf.range(1, K - 1), [K - 2]), # leading dims (...) tf.reshape(0, [1]), # [M] tf.reshape(K - 1, [1]) ], 0) # [N] Kmn = tf.transpose(Kmn, perm) # [..., M, N] leading_dims = Kmn.shape[:-2] Lm = tf.linalg.cholesky(Kmm) # [M, M] # Compute the projection matrix A Lm = tf.broadcast_to(Lm, tf.concat([leading_dims, Lm.shape], 0)) # [..., M, M] A = tf.linalg.triangular_solve(Lm, Kmn, lower=True) # [..., M, N] # compute the covariance due to the conditioning if full_cov: fvar = Knn - tf.linalg.matmul(A, A, transpose_a=True) # [..., N, N] cov_shape = tf.concat([leading_dims, [num_func, N, N]], 0) fvar = tf.broadcast_to(tf.expand_dims(fvar, -3), cov_shape) # [..., R, N, N] else: fvar = Knn - tf.reduce_sum(tf.square(A), -2) # [..., N] cov_shape = tf.concat([leading_dims, [num_func, N]], 0) # [..., R, N] fvar = tf.broadcast_to(tf.expand_dims(fvar, -2), cov_shape) # [..., R, N] # another backsubstitution in the unwhitened case if not white: A = tf.linalg.triangular_solve(tf.linalg.adjoint(Lm), A, lower=False) # construct the conditional mean f_shape = tf.concat([leading_dims, [M, num_func]], 0) # [..., M, R] f = tf.broadcast_to(function, f_shape) # [..., M, R] fmean = tf.linalg.matmul(A, f, transpose_a=True) # [..., N, R] if q_sqrt is not None: q_sqrt_dims = q_sqrt.shape.ndims if q_sqrt_dims == 2: LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2) # [R, M, N] elif q_sqrt_dims == 3: L = q_sqrt L_shape = tf.shape(L) L = tf.broadcast_to(L, tf.concat([leading_dims, L_shape], 0)) shape = [*leading_dims, num_func, M, N] A_tiled = tf.broadcast_to(tf.expand_dims(A, -3), shape) LTA = tf.linalg.matmul(L, A_tiled, transpose_a=True) # [R, M, N] else: # pragma: no cover raise ValueError(""Bad dimension for q_sqrt: %s"" % str(q_sqrt.shape.ndims)) if full_cov: fvar = fvar + tf.linalg.matmul(LTA, LTA, transpose_a=True) # [R, N, N] else: fvar = fvar + tf.reduce_sum(tf.square(LTA), -2) # [R, N] if not full_cov: fvar = tf.linalg.adjoint(fvar) # [N, R] return fmean, fvar # [N, R], [R, N, N] or [N, R] ","def base_conditional(Kmn: tf.Tensor, Kmm: tf.Tensor, Knn: tf.Tensor, function: tf.Tensor, *, full_cov=False, q_sqrt=None, white=False): """""" Given a g1 and g2, and distribution p and q such that p(g2) = N(g2; 0, Kmm) p(g1) = N(g1; 0, Knn) p(g1 | g2) = N(g1; Kₙₘ Kₘₘ⁻¹ g2, Kₙₙ - Kₙₘ Kₘₘ⁻¹ Kₘₙ) And q(g2) = N(g2; f, q_sqrt * q_sqrt^T) This method computes the mean and (co)variance of q(g1) = \int q(g2) p(g1|g2) :param Kmn: [M, ..., N] :param Kmm: [M, M] :param Knn: [..., N, N] or N :param f: [M, R] :param full_cov: bool :param q_sqrt: None or [R, M, M] (lower triangular) :param white: bool :return: [N, R] or [R, N, N] """""" # compute kernel stuff num_func = tf.shape(function)[-1] # R N = tf.shape(Kmn)[-1] M = tf.shape(function)[-2] # get the leadings dims in Kmn to the front of the tensor # if Kmn has rank two, i.e. [M, N], this is the identity op. K = tf.rank(Kmn) perm = tf.concat( [ tf.reshape(tf.range(1, K - 1), [K - 2]), # leading dims (...) tf.reshape(0, [1]), # [M] tf.reshape(K - 1, [1]) ], 0) # [N] Kmn = tf.transpose(Kmn, perm) # [..., M, N] leading_dims = Kmn.shape[:-2] Lm = tf.linalg.cholesky(Kmm) # [M, M] # Compute the projection matrix A Lm = tf.broadcast_to(Lm, tf.concat([leading_dims, Lm.shape], 0)) # [..., M, M] A = tf.linalg.triangular_solve(Lm, Kmn, lower=True) # [..., M, N] # compute the covariance due to the conditioning if full_cov: fvar = Knn - tf.linalg.matmul(A, A, transpose_a=True) # [..., N, N] cov_shape = tf.concat([leading_dims, [num_func, N, N]], 0) fvar = tf.broadcast_to(tf.expand_dims(fvar, -3), cov_shape) # [..., R, N, N] else: fvar = Knn - tf.reduce_sum(tf.square(A), -2) # [..., N] cov_shape = tf.concat([leading_dims, [num_func, N]], 0) # [..., R, N] fvar = tf.broadcast_to(tf.expand_dims(fvar, -2), cov_shape) # [..., R, N] # another backsubstitution in the unwhitened case if not white: A = tf.linalg.triangular_solve(tf.linalg.adjoint(Lm), A, lower=False) # construct the conditional mean f_shape = tf.concat([leading_dims, [M, num_func]], 0) # [..., M, R] f = tf.broadcast_to(function, f_shape) # [..., M, R] fmean = tf.linalg.matmul(A, f, transpose_a=True) # [..., N, R] if q_sqrt is not None: q_sqrt_dims = q_sqrt.shape.ndims if q_sqrt_dims == 2: LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2) # [R, M, N] elif q_sqrt_dims == 3: L = q_sqrt L_shape = tf.shape(L) L = tf.broadcast_to(L, tf.concat([leading_dims, L_shape], 0)) shape = [*leading_dims, num_func, M, N] A_tiled = tf.broadcast_to(tf.expand_dims(A, -3), shape) LTA = tf.linalg.matmul(L, A_tiled, transpose_a=True) # [R, M, N] else: # pragma: no cover raise ValueError(""Bad dimension for q_sqrt: %s"" % str(q_sqrt.shape.ndims)) if full_cov: fvar = fvar + tf.linalg.matmul(LTA, LTA, transpose_a=True) # [R, N, N] else: fvar = fvar + tf.reduce_sum(tf.square(LTA), -2) # [R, N] if not full_cov: fvar = tf.linalg.adjoint(fvar) # [N, R] return fmean, fvar # [N, R], [R, N, N] or [N, R] " 24412,"def assert_common_metrics(aggregator): tags = ['redis_host:{}'.format(common.HOST), 'redis_port:6382', 'redis_role:master'] aggregator.assert_service_check('redis.can_connect', status=Redis.OK, tags=tags) aggregator.assert_metric('redis.mem.fragmentation_ratio', count=2, tags=tags) aggregator.assert_metric('redis.rdb.bgsave', count=2, tags=tags) aggregator.assert_metric('redis.aof.last_rewrite_time', count=2, tags=tags) aggregator.assert_metric('redis.replication.master_repl_offset', count=2, tags=tags) aggregator.assert_metric('redis.net.rejected', count=2, tags=tags) aggregator.assert_metric('redis.cpu.sys_children', count=1, tags=tags) aggregator.assert_metric('redis.aof.rewrite', count=2, tags=tags) aggregator.assert_metric('redis.mem.maxmemory', count=2, tags=tags) aggregator.assert_metric('redis.mem.lua', count=2, tags=tags) aggregator.assert_metric('redis.net.instantaneous_ops_per_sec', count=2, tags=tags) aggregator.assert_metric('redis.perf.latest_fork_usec', count=2, tags=tags) aggregator.assert_metric('redis.keys.evicted', count=2, tags=tags) aggregator.assert_metric('redis.net.slaves', count=2, tags=tags) aggregator.assert_metric('redis.clients.blocked', count=2, tags=tags) aggregator.assert_metric('redis.stats.keyspace_misses', count=1, tags=tags) aggregator.assert_metric('redis.pubsub.channels', count=2, tags=tags) aggregator.assert_metric('redis.net.clients', count=2, tags=tags) aggregator.assert_metric('redis.mem.used', count=2, tags=tags) aggregator.assert_metric('redis.mem.peak', count=2, tags=tags) aggregator.assert_metric('redis.stats.keyspace_hits', count=1, tags=tags) aggregator.assert_metric('redis.net.commands', count=1, tags=tags) aggregator.assert_metric('redis.replication.backlog_histlen', count=2, tags=tags) aggregator.assert_metric('redis.mem.rss', count=2, tags=tags) aggregator.assert_metric('redis.cpu.sys', count=1, tags=tags) aggregator.assert_metric('redis.pubsub.patterns', count=2, tags=tags) aggregator.assert_metric('redis.keys.expired', count=2, tags=tags) aggregator.assert_metric('redis.info.latency_ms', count=2, tags=tags) aggregator.assert_metric('redis.cpu.user', count=1, tags=tags) aggregator.assert_metric('redis.cpu.user_children', count=1, tags=tags) aggregator.assert_metric('redis.rdb.last_bgsave_time', count=2, tags=tags) aggregator.assert_metric('redis.rdb.changes_since_last', count=2, tags=tags) tags += ['redis_db:db14'] aggregator.assert_metric('redis.expires', count=2, tags=tags) aggregator.assert_metric('redis.expires.percent', count=2, tags=tags) aggregator.assert_metric('redis.persist', count=2, tags=tags) aggregator.assert_metric('redis.persist.percent', count=2, tags=tags) aggregator.assert_metric('redis.keys', count=2, tags=tags) aggregator.assert_metric('redis.key.length', count=2, tags=(['key:test_key1', 'key_type:list'] + tags)) aggregator.assert_metric('redis.key.length', count=2, tags=(['key:test_key2', 'key_type:list'] + tags)) aggregator.assert_metric('redis.key.length', count=2, tags=(['key:test_key3', 'key_type:list'] + tags)) aggregator.assert_metric('redis.replication.delay', count=2) if not is_affirmative(common.CLOUD_ENV): assert_non_cloud_metrics(aggregator) ","def assert_common_metrics(aggregator): tags = ['redis_host:{}'.format(common.HOST), 'redis_port:6382', 'redis_role:master'] aggregator.assert_service_check('redis.can_connect', status=Redis.OK, tags=tags) aggregator.assert_metric('redis.mem.fragmentation_ratio', count=2, tags=tags) aggregator.assert_metric('redis.rdb.bgsave', count=2, tags=tags) aggregator.assert_metric('redis.aof.last_rewrite_time', count=2, tags=tags) aggregator.assert_metric('redis.replication.master_repl_offset', count=2, tags=tags) aggregator.assert_metric('redis.net.rejected', count=2, tags=tags) aggregator.assert_metric('redis.cpu.sys_children', count=1, tags=tags) aggregator.assert_metric('redis.aof.rewrite', count=2, tags=tags) aggregator.assert_metric('redis.mem.maxmemory', count=2, tags=tags) aggregator.assert_metric('redis.mem.lua', count=2, tags=tags) aggregator.assert_metric('redis.net.instantaneous_ops_per_sec', count=2, tags=tags) aggregator.assert_metric('redis.perf.latest_fork_usec', count=2, tags=tags) aggregator.assert_metric('redis.keys.evicted', count=2, tags=tags) aggregator.assert_metric('redis.net.slaves', count=2, tags=tags) aggregator.assert_metric('redis.clients.blocked', count=2, tags=tags) aggregator.assert_metric('redis.stats.keyspace_misses', count=1, tags=tags) aggregator.assert_metric('redis.pubsub.channels', count=2, tags=tags) aggregator.assert_metric('redis.net.clients', count=2, tags=tags) aggregator.assert_metric('redis.mem.used', count=2, tags=tags) aggregator.assert_metric('redis.mem.peak', count=2, tags=tags) aggregator.assert_metric('redis.stats.keyspace_hits', count=1, tags=tags) aggregator.assert_metric('redis.net.commands', count=1, tags=tags) aggregator.assert_metric('redis.replication.backlog_histlen', count=2, tags=tags) aggregator.assert_metric('redis.mem.rss', count=2, tags=tags) aggregator.assert_metric('redis.cpu.sys', count=1, tags=tags) aggregator.assert_metric('redis.pubsub.patterns', count=2, tags=tags) aggregator.assert_metric('redis.keys.expired', count=2, tags=tags) aggregator.assert_metric('redis.info.latency_ms', count=2, tags=tags) aggregator.assert_metric('redis.cpu.user', count=1, tags=tags) aggregator.assert_metric('redis.cpu.user_children', count=1, tags=tags) aggregator.assert_metric('redis.rdb.last_bgsave_time', count=2, tags=tags) aggregator.assert_metric('redis.rdb.changes_since_last', count=2, tags=tags) tags += ['redis_db:db14'] aggregator.assert_metric('redis.expires', count=2, tags=tags) aggregator.assert_metric('redis.expires.percent', count=2, tags=tags) aggregator.assert_metric('redis.persist', count=2, tags=tags) aggregator.assert_metric('redis.persist.percent', count=2, tags=tags) aggregator.assert_metric('redis.keys', count=2, tags=tags) aggregator.assert_metric('redis.key.length', count=2, tags=(['key:test_key1', 'key_type:list'] + tags)) aggregator.assert_metric('redis.key.length', count=2, tags=(['key:test_key2', 'key_type:list'] + tags)) aggregator.assert_metric('redis.key.length', count=2, tags=(['key:test_key3', 'key_type:list'] + tags)) aggregator.assert_metric('redis.replication.delay', count=2) if not is_affirmative(common.CLOUD_ENV): assert_non_cloud_metrics(aggregator, tags) " 27766,"def test_tmpdir_always_is_realpath(pytester: Pytester): # the reason why tmpdir should be a realpath is that # when you cd to it and do ""os.getcwd()"" you will anyway # get the realpath. Using the symlinked path can thus # easily result in path-inequality # XXX if that proves to be a problem, consider using # os.environ[""PWD""] realtemp = pytester.tmpdir.mkdir(""myrealtemp"") linktemp = pytester.tmpdir.join(""symlinktemp"") attempt_symlink_to(linktemp, str(realtemp)) p = pytester.makepyfile( """""" def test_1(tmpdir): import os assert os.path.realpath(str(tmpdir)) == str(tmpdir) """""" ) result = pytester.runpytest(""-s"", p, ""--basetemp=%s/bt"" % linktemp) assert not result.ret ","def test_tmpdir_always_is_realpath(pytester: Pytester) -> None: # the reason why tmpdir should be a realpath is that # when you cd to it and do ""os.getcwd()"" you will anyway # get the realpath. Using the symlinked path can thus # easily result in path-inequality # XXX if that proves to be a problem, consider using # os.environ[""PWD""] realtemp = pytester.tmpdir.mkdir(""myrealtemp"") linktemp = pytester.tmpdir.join(""symlinktemp"") attempt_symlink_to(linktemp, str(realtemp)) p = pytester.makepyfile( """""" def test_1(tmpdir): import os assert os.path.realpath(str(tmpdir)) == str(tmpdir) """""" ) result = pytester.runpytest(""-s"", p, ""--basetemp=%s/bt"" % linktemp) assert not result.ret " 37040,"def _text_checker(job, interval, _interval_set=False, quiet=False, to_file=None): """"""A text-based job status checker Args: job (BaseJob): The job to check. interval (int): The interval at which to check. _interval_set (bool): Was interval time set by user? quiet (bool): If True, do not print status messages. to_file (file): If file print status messages to it, else to stdout. """""" _outstream = to_file if to_file else sys.stdout status = job.status() msg = status.value prev_msg = msg msg_len = len(msg) if not quiet: print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream) while status.name not in ['DONE', 'CANCELLED', 'ERROR']: time.sleep(interval) status = job.status() msg = status.value if status.name == 'QUEUED': msg += ' (%s)' % job.queue_position() if not _interval_set: interval = max(job.queue_position(), 2) else: if not _interval_set: interval = 2 # Adjust length of message so there are no artifacts if len(msg) < msg_len: msg += ' ' * (msg_len - len(msg)) elif len(msg) > msg_len: msg_len = len(msg) if msg != prev_msg and not quiet: print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream) prev_msg = msg if not quiet: print('', file=_outstream) ","def _text_checker(job, interval, _interval_set=False, quiet=False, output=sys.stdout): """"""A text-based job status checker Args: job (BaseJob): The job to check. interval (int): The interval at which to check. _interval_set (bool): Was interval time set by user? quiet (bool): If True, do not print status messages. to_file (file): If file print status messages to it, else to stdout. """""" _outstream = to_file if to_file else sys.stdout status = job.status() msg = status.value prev_msg = msg msg_len = len(msg) if not quiet: print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream) while status.name not in ['DONE', 'CANCELLED', 'ERROR']: time.sleep(interval) status = job.status() msg = status.value if status.name == 'QUEUED': msg += ' (%s)' % job.queue_position() if not _interval_set: interval = max(job.queue_position(), 2) else: if not _interval_set: interval = 2 # Adjust length of message so there are no artifacts if len(msg) < msg_len: msg += ' ' * (msg_len - len(msg)) elif len(msg) > msg_len: msg_len = len(msg) if msg != prev_msg and not quiet: print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream) prev_msg = msg if not quiet: print('', file=_outstream) " 55393,"def _log_early_stop_params(early_stop_callback, client, run_id): """""" Logs Early Stop parameters into mlflow :param early_stop_callback: Early stopping callback dict """""" if hasattr(early_stop_callback, ""monitor""): client.log_params( run_id, {""monitor"": early_stop_callback.monitor}, ) if hasattr(early_stop_callback, ""mode""): client.log_params( run_id, {""mode"": early_stop_callback.mode}, ) if hasattr(early_stop_callback, ""patience""): client.log_params( run_id, {""patience"": early_stop_callback.patience}, ) if hasattr(early_stop_callback, ""min_delta""): client.log_params( run_id, {""min_delta"": early_stop_callback.min_delta}, ) if hasattr(early_stop_callback, ""stopped_epoch""): client.log_params( run_id, {""stopped_epoch"": early_stop_callback.stopped_epoch}, ) ","def _log_early_stop_params(early_stop_callback, client, run_id): """""" Logs Early Stop parameters into mlflow :param early_stop_callback: Early stopping callback dict """""" params = {} if hasattr(early_stop_callback, ""monitor""): params[""monitor""] = early_stop_callback.monitor if hasattr(early_stop_callback, ""mode""): params[""mode""] = early_stop_callback.mode client.log_params(run_id, params) if hasattr(early_stop_callback, ""patience""): client.log_params( run_id, {""patience"": early_stop_callback.patience}, ) if hasattr(early_stop_callback, ""min_delta""): client.log_params( run_id, {""min_delta"": early_stop_callback.min_delta}, ) if hasattr(early_stop_callback, ""stopped_epoch""): client.log_params( run_id, {""stopped_epoch"": early_stop_callback.stopped_epoch}, ) " 4971,"def julian2num(j): """""" Convert a Julian date (or sequence) to a Matplotlib date (or sequence). Parameters ---------- j : float or sequence of floats Julian date(s) (days relative to 4713 BC Jan 1, 12:00:00 Julian calendar or 4714 BC Nov 24, 12:00:00, proleptic Gregorian calendar) Returns ------- float or sequence of floats Matplotlib date(s) (days relative to `.get_epoch`) """""" ep = np.datetime64(get_epoch(), 'h').astype(float) / 24. ep0 = np.datetime64('0000-12-31T00:00:00', 'h').astype(float) / 24. # Julian offset defined above is relative to 0000-12-31, but we need # relative to our current epoch: dt = JULIAN_OFFSET - ep0 + ep return np.subtract(j, dt) # Handles both scalar & nonscalar j. ","def julian2num(j): """""" Convert a Julian date (or sequence) to a Matplotlib date (or sequence). Parameters ---------- j : float or sequence of floats Julian date(s) (days relative to 4713 BC Jan 1, 12:00:00 Julian calendar or 4714 BC Nov 24, 12:00:00, proleptic Gregorian calendar) Returns ------- float or sequence of floats Matplotlib date(s); i.e. days relative to `.get_epoch`. """""" ep = np.datetime64(get_epoch(), 'h').astype(float) / 24. ep0 = np.datetime64('0000-12-31T00:00:00', 'h').astype(float) / 24. # Julian offset defined above is relative to 0000-12-31, but we need # relative to our current epoch: dt = JULIAN_OFFSET - ep0 + ep return np.subtract(j, dt) # Handles both scalar & nonscalar j. " 36431,"def chown(path, user=None, group=None, dir_fd=None, follow_symlinks=True): """"""Change owner user and group of the given path. user and group can be the uid/gid or the user/group names, and in that case, they are converted to their respective uid/gid. If dir_fd is set, it should be an open file descriptor to a directory. If follow_symlinks is set to False and the last element of the path is a symbolic link, chown will modify the link itself and not the file being referenced by the link. """""" if user is None and group is None: raise ValueError(""user and/or group must be set"") _user = user _group = group # -1 means don't change it if user is None: _user = -1 # user can either be an int (the uid) or a string (the system username) elif isinstance(user, str): _user = _get_uid(user) if _user is None: raise LookupError(""no such user: {!r}"".format(user)) if group is None: _group = -1 elif not isinstance(group, int): _group = _get_gid(group) if _group is None: raise LookupError(""no such group: {!r}"".format(group)) os.chown(path, _user, _group, dir_fd=dir_fd, follow_symlinks=follow_symlinks) ","def chown(path, user=None, group=None, *, dir_fd=None, follow_symlinks=True): """"""Change owner user and group of the given path. user and group can be the uid/gid or the user/group names, and in that case, they are converted to their respective uid/gid. If dir_fd is set, it should be an open file descriptor to a directory. If follow_symlinks is set to False and the last element of the path is a symbolic link, chown will modify the link itself and not the file being referenced by the link. """""" if user is None and group is None: raise ValueError(""user and/or group must be set"") _user = user _group = group # -1 means don't change it if user is None: _user = -1 # user can either be an int (the uid) or a string (the system username) elif isinstance(user, str): _user = _get_uid(user) if _user is None: raise LookupError(""no such user: {!r}"".format(user)) if group is None: _group = -1 elif not isinstance(group, int): _group = _get_gid(group) if _group is None: raise LookupError(""no such group: {!r}"".format(group)) os.chown(path, _user, _group, dir_fd=dir_fd, follow_symlinks=follow_symlinks) " 42560,"def test_all_action_types_in_db(database): """""" Test that all action_type in DB deserialize to a valid ActionType """""" # Query for all locations cursor = database.conn.cursor() action_types = cursor.execute(""SELECT type, seq from action_type"") # We deserialize, then serialize and compare the result for action_type, seq in action_types: deserialized_action_type = deserialize_action_type_from_db(action_type) assert deserialized_action_type.value == seq - 1 action_type_name = deserialize_action_type(str(deserialized_action_type)) assert action_type_name == deserialized_action_type ","def test_all_action_types_in_db(database): """""" Test that all action_type in DB deserialize to a valid ActionType """""" # Query for all action types cursor = database.conn.cursor() action_types = cursor.execute(""SELECT type, seq from action_type"") # We deserialize, then serialize and compare the result for action_type, seq in action_types: deserialized_action_type = deserialize_action_type_from_db(action_type) assert deserialized_action_type.value == seq - 1 action_type_name = deserialize_action_type(str(deserialized_action_type)) assert action_type_name == deserialized_action_type " 57708,"def main(): command = demisto.command() params = demisto.params() report_url = params.get('report_url') verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) mapper_in = params.get('mapper_in', DEFAULT_MAPPER_IN) workday_username = params.get('credentials', {}).get('identifier') workday_password = params.get('credentials', {}).get('password') LOG(f'Command being called is {command}') client = Client( base_url=None, verify=verify_certificate, headers={ 'Accept': 'application/json', 'Content-Type': 'application/json' }, proxy=proxy, ok_codes=(200, 204), auth=requests.auth.HTTPBasicAuth(workday_username, workday_password), report_url=report_url ) try: if command == 'test-module': return_results(test_module(client, params, mapper_in)) if command == 'fetch-incidents': ''' Checks if there are events are stored in the integration context. If yes, it gets it from there. Else, it makes a call to Workday to get a new report Returns the first x events (x being the fetch limit) and stores the remaining in integration context ''' workday_context = demisto.getIntegrationContext() events = workday_context.get('events') last_run = demisto.getLastRun() run_first_command = False if last_run: if last_run.get(""sync_users""): run_first_command = True if not run_first_command: workday_first_run_command(client, mapper_in) if not events: # Get the events from Workday by making an API call. Last run is updated only when API call is made last_run, events = fetch_incidents( client=client, last_run=last_run, fetch_time=params.get('fetch_events_time_minutes'), mapper_in=mapper_in ) fetch_limit = int(params.get('max_fetch')) demisto.setLastRun(last_run) demisto.incidents(events[:fetch_limit]) # Set the remaining events back to integration context workday_context = {'events': events[fetch_limit:]} demisto.setIntegrationContext(workday_context) except Exception as e: return_error(f'Failed to execute {demisto.command()} command, Error: {e}. Traceback: {traceback.format_exc()}') ","def main(): command = demisto.command() params = demisto.params() report_url = params.get('report_url') verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) mapper_in = params.get('mapper_in', DEFAULT_MAPPER_IN) workday_username = params.get('credentials', {}).get('identifier') workday_password = params.get('credentials', {}).get('password') LOG(f'Command being called is {command}') client = Client( base_url=None, verify=verify_certificate, headers={ 'Accept': 'application/json', 'Content-Type': 'application/json' }, proxy=proxy, ok_codes=(200, 204), auth=requests.auth.HTTPBasicAuth(workday_username, workday_password), report_url=report_url ) try: if command == 'test-module': return_results(test_module(client, params, mapper_in)) if command == 'fetch-incidents': ''' Checks if there are events are stored in the integration context. If yes, it gets it from there. Else, it makes a call to Workday to get a new report Returns the first x events (x being the fetch limit) and stores the remaining in integration context ''' workday_context = demisto.getIntegrationContext() events = workday_context.get('events') last_run = demisto.getLastRun() run_first_command = False if last_run: if last_run.get(""sync_users""): run_first_command = True if not run_first_command: workday_first_run_command(client, mapper_in) if not events: # Get the events from Workday by making an API call. Last run is updated only when API call is made last_run, events = fetch_incidents( client=client, last_run=last_run, fetch_time=params.get('fetch_events_time_minutes'), mapper_in=mapper_in, report_url=report_url ) fetch_limit = int(params.get('max_fetch')) demisto.setLastRun(last_run) demisto.incidents(events[:fetch_limit]) # Set the remaining events back to integration context workday_context = {'events': events[fetch_limit:]} demisto.setIntegrationContext(workday_context) except Exception as e: return_error(f'Failed to execute {demisto.command()} command, Error: {e}. Traceback: {traceback.format_exc()}') " 34908,"def sequence_mask(data, valid_length=None, mask_value=0, axis=0): """"""Sets all elements outside the expected length of the sequence to a constant value. This function takes an n-dimensional input array of the form [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...] and returns an array of the same shape. `axis` means the axis of the length dimension and can only be 0 or 1. If `axis` is 0, the data must have shape [MAX_LENGTH, batch_size, ...]. Otherwise (axis=1), the data must have shape [batch_size, MAX_LENGTH, ...]. `valid_length` gives the length of each sequence. `valid_length` should be a 1D int array with positive ints and has dimension [batch_size,]. Parameters ---------- data : tvm.Tensor N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...] depending on the value of `axis`. valid_length : tvm.Tensor or None 1-D with shape [batch_size,] mask_value : float, default 0 The masking value, default axis : int, default 0 axis of the length dimension, must be 0 or 1. Returns ------- output : tvm.Tensor N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...] depending on the value of `axis`. """""" assert len(data.shape) >= 2,\ ""only support data.ndim >= 2, received data.shape = {}"".format(data.shape) assert axis == 0 or axis == 1, ""only support axis = 0, 1, received axis = {}"".format(axis) return cpp.sequence_mask(data, valid_length, mask_value, axis) ","def sequence_mask(data, valid_length=None, mask_value=0, axis=0): """"""Sets all elements outside the expected length of the sequence to a constant value. This function takes an n-dimensional input array of the form [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...] and returns an array of the same shape. `axis` means the axis of the length dimension and can only be 0 or 1. If `axis` is 0, the data must have shape [MAX_LENGTH, batch_size, ...]. Otherwise (axis=1), the data must have shape [batch_size, MAX_LENGTH, ...]. `valid_length` gives the length of each sequence. `valid_length` should be a 1D int array with positive ints and has dimension [batch_size,]. Parameters ---------- data : tvm.Tensor N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...] depending on the value of `axis`. valid_length : tvm.Tensor or None 1-D with shape [batch_size,] mask_value : float, default 0 The masking value, default 0 axis : int, default 0 axis of the length dimension, must be 0 or 1. Returns ------- output : tvm.Tensor N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...] depending on the value of `axis`. """""" assert len(data.shape) >= 2,\ ""only support data.ndim >= 2, received data.shape = {}"".format(data.shape) assert axis == 0 or axis == 1, ""only support axis = 0, 1, received axis = {}"".format(axis) return cpp.sequence_mask(data, valid_length, mask_value, axis) " 57965,"def main(): # Create EDL instance parameters = {'uri': '/settings/integration', 'body': body} try: results = demisto.executeCommand('demisto-api-put', parameters) except Exception as e: return_error(e) output = { ""Name"": edl_instance_name, ""PortListName"": edl_port_list_name, ""Query"": edl_query, ""Port"": port } readable_output = tableToMarkdown(""Indicator Feed"", output) entry = CommandResults( outputs_key_field=""Name"", outputs_prefix=""EDL"", outputs=output, readable_output=readable_output, raw_response=results ) return_results(entry) ","def main(): # Create EDL instance parameters = {'uri': '/settings/integration', 'body': body} try: results = demisto.executeCommand('demisto-api-put', parameters) except Exception as e: return_error(e) output = { ""Name"": edl_instance_name, ""PortListName"": edl_port_list_name, ""Query"": edl_query, ""Port"": port } readable_output = tableToMarkdown(""Indicator Feed"", output) return_results(CommandResults( outputs_key_field=""Name"", outputs_prefix=""EDL"", outputs=output, readable_output=readable_output, raw_response=results )) " 42917,"def is_clique(graph: nx.Graph) -> bool: """"""Determines if the input graph is a clique. A clique of :math:`n` nodes has :math:`n*(n-1)/2` edges. Example usage: .. code-block:: >>> from strawberryfields.apps.graph import utils >>> import networkx as nx >>> graph = nx.complete_graph(10) >>> utils.is_clique(graph) True Args: graph (nx.Graph): The input graph. Returns: bool: ``True`` if input graph is a clique and ``False`` otherwise. """""" edges = graph.edges nodes = graph.order() return len(edges) == nodes * (nodes - 1) / 2 ","def is_clique(graph: nx.Graph) -> bool: """"""Determines if the input graph is a clique. A clique of :math:`n` nodes has :math:`n(n-1)/2` edges. Example usage: .. code-block:: >>> from strawberryfields.apps.graph import utils >>> import networkx as nx >>> graph = nx.complete_graph(10) >>> utils.is_clique(graph) True Args: graph (nx.Graph): The input graph. Returns: bool: ``True`` if input graph is a clique and ``False`` otherwise. """""" edges = graph.edges nodes = graph.order() return len(edges) == nodes * (nodes - 1) / 2 " 25717,"def check_shapes(*specs: ArgumentSpec): """""" Decorator that checks the shapes of tensor arguments. This is compatible with both TensorFlow and NumPy. The specs passed to this decorator are (name, spec) tuples, where: name is a specification of the target to check the shape of. It can be the name of an argument. Or, it can be the special value ""return"", in which case the return value of the function is checked. Furthermore you can use dotted syntax (`argument.member1.member2`) to check members of objects. You can also use list lookup syntax (`argument[7]`) to access single members of tuples or lists. spec is definition of the expected shape. spec a sequence of one of: A constant integer. The corresponding dimension must have exactly this size. A variable name. The corresponding dimension can have any size, but must be the same everywhere that variable name is used. A variable name followed by ellipsis. This matches any number of dimensions. If used this must the first item in the spec sequence. Speed and interactions with `tf.function`: If you want to wrap your function in both `tf.function` and `check_shapes` it is recommended you put the `tf.function` outermost so that the shape checks are inside `tf.function`. Shape checks are performed while tracing graphs, but *not* compiled into the actual graphs. This is considered a feature as that means that `check_shapes` doesn't impact the execution speed of compiled functions. However, it also means that tensor dimensions of dynamic size are not verified in compiled mode. Examine the `statistics` global to monitor whether many of your dimensions are dynamic. Example: @tf.function @check_shapes( (""features"", [""batch_shape..."", ""n_features""]), (""weights"", [""n_features""]), (""return"", [""batch_shape...""]), ) def linear_model(features: tf.Tensor, weights: tf.Tensor) -> tf.Tensor: ... """""" parsed_specs = _parse_specs(specs) # We create four groups of specs: # * Groups for checking before and after the function is called. # * Specs for printing in error message and specs for actually checking. pre_print_specs = [spec for spec in parsed_specs if not spec.argument_ref.is_result] post_print_specs = parsed_specs pre_check_specs = pre_print_specs post_check_specs = [spec for spec in parsed_specs if spec.argument_ref.is_result] def _check_shapes(func: _C) -> _C: signature = inspect.signature(func) @wraps(func) def wrapped(*args, **kwargs): try: bound_arguments = signature.bind(*args, **kwargs) except TypeError: # TypeError is raised if *args and **kwargs don't actually match the arguments of # `func`. In that case we just call `func` normally, which will also result in an # error, but an error with the error message the user is used to. func(*args, **kwargs) raise AssertionError( ""The above line should fail so this line should never be reached."" ) bound_arguments.apply_defaults() arg_map = bound_arguments.arguments context: Dict[str, Union[int, List[Optional[int]]]] = {} _assert_shapes(func, pre_print_specs, pre_check_specs, arg_map, context) result = func(*args, **kwargs) arg_map[_RESULT_TOKEN] = result _assert_shapes(func, post_print_specs, post_check_specs, arg_map, context) return result wrapped.__check_shapes__ = _check_shapes # type: ignore return cast(_C, wrapped) return _check_shapes ","def check_shapes(*specs: ArgumentSpec): """""" Decorator that checks the shapes of tensor arguments. This is compatible with both TensorFlow and NumPy. The specs passed to this decorator are (name, spec) tuples, where: name is a specification of the target to check the shape of. It can be the name of an argument. Or, it can be the special value ""return"", in which case the return value of the function is checked. Furthermore you can use dotted syntax (`argument.member1.member2`) to check members of objects. You can also use list lookup syntax (`argument[7]`) to access single members of tuples or lists. spec is a definition of the expected shape. spec is a sequence of one of: A constant integer. The corresponding dimension must have exactly this size. A variable name. The corresponding dimension can have any size, but must be the same everywhere that variable name is used. A variable name followed by ellipsis. This matches any number of dimensions. If used this must the first item in the spec sequence. Speed and interactions with `tf.function`: If you want to wrap your function in both `tf.function` and `check_shapes` it is recommended you put the `tf.function` outermost so that the shape checks are inside `tf.function`. Shape checks are performed while tracing graphs, but *not* compiled into the actual graphs. This is considered a feature as that means that `check_shapes` doesn't impact the execution speed of compiled functions. However, it also means that tensor dimensions of dynamic size are not verified in compiled mode. Examine the `statistics` global to monitor whether many of your dimensions are dynamic. Example: @tf.function @check_shapes( (""features"", [""batch_shape..."", ""n_features""]), (""weights"", [""n_features""]), (""return"", [""batch_shape...""]), ) def linear_model(features: tf.Tensor, weights: tf.Tensor) -> tf.Tensor: ... """""" parsed_specs = _parse_specs(specs) # We create four groups of specs: # * Groups for checking before and after the function is called. # * Specs for printing in error message and specs for actually checking. pre_print_specs = [spec for spec in parsed_specs if not spec.argument_ref.is_result] post_print_specs = parsed_specs pre_check_specs = pre_print_specs post_check_specs = [spec for spec in parsed_specs if spec.argument_ref.is_result] def _check_shapes(func: _C) -> _C: signature = inspect.signature(func) @wraps(func) def wrapped(*args, **kwargs): try: bound_arguments = signature.bind(*args, **kwargs) except TypeError: # TypeError is raised if *args and **kwargs don't actually match the arguments of # `func`. In that case we just call `func` normally, which will also result in an # error, but an error with the error message the user is used to. func(*args, **kwargs) raise AssertionError( ""The above line should fail so this line should never be reached."" ) bound_arguments.apply_defaults() arg_map = bound_arguments.arguments context: Dict[str, Union[int, List[Optional[int]]]] = {} _assert_shapes(func, pre_print_specs, pre_check_specs, arg_map, context) result = func(*args, **kwargs) arg_map[_RESULT_TOKEN] = result _assert_shapes(func, post_print_specs, post_check_specs, arg_map, context) return result wrapped.__check_shapes__ = _check_shapes # type: ignore return cast(_C, wrapped) return _check_shapes " 7946,"def wwinp_to_wws(path): """"""Creates WeightWindows classes from a wwinp file Parameters ---------- path : str or pathlib.Path object Path to the wwinp file Returns ------- list of openmc.WeightWindows """""" # create generator for getting the next parameter from the file wwinp = __wwinp_reader(path) # first parameter, if, of wwinp file is unused next(wwinp) # check time parameter, iv if int(float(next(wwinp))) > 1: raise ValueError('Time-dependent weight windows ' 'are not yet supported.') # number of particle types, ni n_particle_types = int(float(next(wwinp))) # read an indicator of the mesh type. # this will be 10 if a rectilinear mesh # and 16 for cylindrical or spherical meshes mesh_chars = int(float(next(wwinp))) if mesh_chars != 10: # TODO: read the first entry by default and display a warning raise NotImplementedError('Cylindrical and Spherical meshes ' 'are not currently supported') # read the number of energy groups for each particle, ne n_egroups = [int(next(wwinp)) for _ in range(n_particle_types)] # order that supported particle types will appear in the file particle_types = ['neutron', 'photon'] # add particle to list if at least one energy group is present particles = [p for e, p in zip(n_egroups, particle_types) if e > 0] # truncate list of energy groups if needed n_egroups = [e for e in n_egroups if e > 0] if n_particle_types > 2: msg = ('More than two particle types are present. ' 'Only neutron and photon weight windows will be read.') warnings.warn(msg) # read total number of fine mesh elements in each coarse # element (nfx, nfy, nfz) n_fine_x = int(float(next(wwinp))) n_fine_y = int(float(next(wwinp))) n_fine_z = int(float(next(wwinp))) header_mesh_dims = (n_fine_x, n_fine_y, n_fine_z) # read the mesh origin: x0, y0, z0 llc = tuple(float(next(wwinp)) for _ in range(3)) # read the number of coarse mesh elements (ncx, ncy, ncz) n_coarse_x = int(float(next(wwinp))) n_coarse_y = int(float(next(wwinp))) n_coarse_z = int(float(next(wwinp))) # skip the value defining the geometry type, nwg, we already know this # 1 - rectilinear mesh # 2 - cylindrical mesh # 3 - spherical mesh mesh_type = int(float(next(wwinp))) if mesh_type != 1: # TODO: support additional mesh types raise NotImplementedError('Cylindrical and Spherical meshes ' 'are not currently supported') # internal function for parsing mesh coordinates def _read_mesh_coords(wwinp, n_coarse_bins): coords = [float(next(wwinp))] for _ in range(n_coarse_bins): # number of fine mesh elements in this coarse element, sx sx = int(float(next(wwinp))) # value of next coordinate, px px = float(next(wwinp)) # fine mesh ratio, qx (currently unused) qx = next(wwinp) # append the fine mesh coordinates for this coarse element coords += list(np.linspace(coords[-1], px, sx + 1))[1:] return np.asarray(coords) # read the coordinates for each dimension into a rectilinear mesh mesh = RectilinearMesh() mesh.x_grid = _read_mesh_coords(wwinp, n_coarse_x) mesh.y_grid = _read_mesh_coords(wwinp, n_coarse_y) mesh.z_grid = _read_mesh_coords(wwinp, n_coarse_z) dims = ('x', 'y', 'z') # check consistency of mesh coordinates mesh_llc = mesh_val = (mesh.x_grid[0], mesh.y_grid[0], mesh.z_grid[0]) for dim, header_val, mesh_val in zip(dims, llc, mesh_llc): if header_val != mesh_val: msg = ('The {} corner of the mesh ({}) does not match ' 'the value read in block 1 of the wwinp file ({})') raise ValueError(msg.format(dim, mesh_val, header_val)) # check total number of mesh elements in each direction mesh_dims = mesh.dimension for dim, header_val, mesh_val in zip(dims, header_mesh_dims, mesh_dims): if header_val != mesh_val: msg = ('Total number of mesh elements read in the {} ' 'direction ({}) is inconsistent with the ' 'number read in block 1 of the wwinp file ({})') raise ValueError(msg.format(dim, mesh_val, header_val)) # read energy bins and weight window values for each particle wws = [] for particle, ne in zip(particles, n_egroups): # read upper energy bounds # it is implied that zero is always the first bound in MCNP e_bounds = np.asarray([0.0] + [float(next(wwinp)) for _ in range(ne)]) # adjust energy from MeV to eV e_bounds *= 1E6 # create an array for weight window lower bounds ww_lb = np.zeros((*mesh.dimension, ne)) for ijk in mesh.indices: # MCNP ordering for weight windows matches that of OpenMC # ('xyz' with x changing fastest) idx = tuple([v - 1 for v in ijk] + [slice(None)]) ww_lb[idx] = [float(next(wwinp)) for _ in range(ne)] # create a WeightWindows object and add it to the output list ww = WeightWindows(id=None, mesh=mesh, lower_ww_bounds=ww_lb.flatten(), upper_bound_ratio=5.0, energy_bounds=e_bounds, particle_type=particle) wws.append(ww) return wws ","def wwinp_to_wws(path): """"""Creates WeightWindows classes from a wwinp file Parameters ---------- path : str or pathlib.Path object Path to the wwinp file Returns ------- list of openmc.WeightWindows """""" # create generator for getting the next parameter from the file wwinp = __wwinp_reader(path) # first parameter, 'if' (file type), of wwinp file is unused next(wwinp) # check time parameter, iv if int(float(next(wwinp))) > 1: raise ValueError('Time-dependent weight windows ' 'are not yet supported.') # number of particle types, ni n_particle_types = int(float(next(wwinp))) # read an indicator of the mesh type. # this will be 10 if a rectilinear mesh # and 16 for cylindrical or spherical meshes mesh_chars = int(float(next(wwinp))) if mesh_chars != 10: # TODO: read the first entry by default and display a warning raise NotImplementedError('Cylindrical and Spherical meshes ' 'are not currently supported') # read the number of energy groups for each particle, ne n_egroups = [int(next(wwinp)) for _ in range(n_particle_types)] # order that supported particle types will appear in the file particle_types = ['neutron', 'photon'] # add particle to list if at least one energy group is present particles = [p for e, p in zip(n_egroups, particle_types) if e > 0] # truncate list of energy groups if needed n_egroups = [e for e in n_egroups if e > 0] if n_particle_types > 2: msg = ('More than two particle types are present. ' 'Only neutron and photon weight windows will be read.') warnings.warn(msg) # read total number of fine mesh elements in each coarse # element (nfx, nfy, nfz) n_fine_x = int(float(next(wwinp))) n_fine_y = int(float(next(wwinp))) n_fine_z = int(float(next(wwinp))) header_mesh_dims = (n_fine_x, n_fine_y, n_fine_z) # read the mesh origin: x0, y0, z0 llc = tuple(float(next(wwinp)) for _ in range(3)) # read the number of coarse mesh elements (ncx, ncy, ncz) n_coarse_x = int(float(next(wwinp))) n_coarse_y = int(float(next(wwinp))) n_coarse_z = int(float(next(wwinp))) # skip the value defining the geometry type, nwg, we already know this # 1 - rectilinear mesh # 2 - cylindrical mesh # 3 - spherical mesh mesh_type = int(float(next(wwinp))) if mesh_type != 1: # TODO: support additional mesh types raise NotImplementedError('Cylindrical and Spherical meshes ' 'are not currently supported') # internal function for parsing mesh coordinates def _read_mesh_coords(wwinp, n_coarse_bins): coords = [float(next(wwinp))] for _ in range(n_coarse_bins): # number of fine mesh elements in this coarse element, sx sx = int(float(next(wwinp))) # value of next coordinate, px px = float(next(wwinp)) # fine mesh ratio, qx (currently unused) qx = next(wwinp) # append the fine mesh coordinates for this coarse element coords += list(np.linspace(coords[-1], px, sx + 1))[1:] return np.asarray(coords) # read the coordinates for each dimension into a rectilinear mesh mesh = RectilinearMesh() mesh.x_grid = _read_mesh_coords(wwinp, n_coarse_x) mesh.y_grid = _read_mesh_coords(wwinp, n_coarse_y) mesh.z_grid = _read_mesh_coords(wwinp, n_coarse_z) dims = ('x', 'y', 'z') # check consistency of mesh coordinates mesh_llc = mesh_val = (mesh.x_grid[0], mesh.y_grid[0], mesh.z_grid[0]) for dim, header_val, mesh_val in zip(dims, llc, mesh_llc): if header_val != mesh_val: msg = ('The {} corner of the mesh ({}) does not match ' 'the value read in block 1 of the wwinp file ({})') raise ValueError(msg.format(dim, mesh_val, header_val)) # check total number of mesh elements in each direction mesh_dims = mesh.dimension for dim, header_val, mesh_val in zip(dims, header_mesh_dims, mesh_dims): if header_val != mesh_val: msg = ('Total number of mesh elements read in the {} ' 'direction ({}) is inconsistent with the ' 'number read in block 1 of the wwinp file ({})') raise ValueError(msg.format(dim, mesh_val, header_val)) # read energy bins and weight window values for each particle wws = [] for particle, ne in zip(particles, n_egroups): # read upper energy bounds # it is implied that zero is always the first bound in MCNP e_bounds = np.asarray([0.0] + [float(next(wwinp)) for _ in range(ne)]) # adjust energy from MeV to eV e_bounds *= 1E6 # create an array for weight window lower bounds ww_lb = np.zeros((*mesh.dimension, ne)) for ijk in mesh.indices: # MCNP ordering for weight windows matches that of OpenMC # ('xyz' with x changing fastest) idx = tuple([v - 1 for v in ijk] + [slice(None)]) ww_lb[idx] = [float(next(wwinp)) for _ in range(ne)] # create a WeightWindows object and add it to the output list ww = WeightWindows(id=None, mesh=mesh, lower_ww_bounds=ww_lb.flatten(), upper_bound_ratio=5.0, energy_bounds=e_bounds, particle_type=particle) wws.append(ww) return wws " 35609,"def get_video_backend(): """""" Returns the currently active video backend used to decode videos. Returns: (str): Name of the video backend. one of {'pyav', 'video_reader'}. """""" return _video_backend ","def get_video_backend(): """""" Returns the currently active video backend used to decode videos. Returns: str: Name of the video backend. one of {'pyav', 'video_reader'}. """""" return _video_backend " 17495,"def polyval( coord: T_Xarray, coeffs: T_Xarray, degree_dim: Hashable = ""degree"" ) -> T_Xarray: """"""Evaluate a polynomial at specific values Parameters ---------- coord : DataArray or Dataset Values at which to evaluate the polynomial. coeffs : DataArray or Dataset Coefficients of the polynomial. degree_dim : Hashable, default: ""degree"" Name of the polynomial degree dimension in `coeffs`. Returns ------- DataArray or Dataset Evaluated polynomial. See Also -------- xarray.DataArray.polyfit numpy.polynomial.polynomial.polyval """""" from .dataarray import DataArray from .dataset import Dataset deg_coord = coeffs[degree_dim] deg_idx_sorted = np.argsort(deg_coord.values) max_deg = int(deg_coord[deg_idx_sorted[-1]]) def ensure_numeric(data: DataArray) -> DataArray: if data.dtype.kind in ""mM"": return DataArray( datetime_to_numeric( data, offset=np.datetime64(""1970-01-01""), datetime_unit=""ns"" ), dims=data.dims, coords=data.coords, attrs=data.attrs, ) return data if isinstance(coord, Dataset): coord = coord.map(ensure_numeric) else: coord = ensure_numeric(coord) # using Horner's method # https://en.wikipedia.org/wiki/Horner%27s_method res = coeffs.isel({degree_dim: int(deg_idx_sorted[-1])}, drop=True) + zeros_like( coord ) deg_idx = len(deg_coord) - 2 for deg in range(max_deg - 1, -1, -1): res *= coord if deg_idx >= 0 and deg == int(deg_coord[deg_idx_sorted[deg_idx]]): # this degrees coefficient is provided, if not assume 0 res += coeffs.isel({degree_dim: int(deg_idx_sorted[deg_idx])}, drop=True) deg_idx -= 1 return res ","def polyval( coord: T_Xarray, coeffs: T_Xarray, degree_dim: Hashable = ""degree"" ) -> T_Xarray: """"""Evaluate a polynomial at specific values Parameters ---------- coord : DataArray or Dataset Values at which to evaluate the polynomial. coeffs : DataArray or Dataset Coefficients of the polynomial. degree_dim : Hashable, default: ""degree"" Name of the polynomial degree dimension in `coeffs`. Returns ------- DataArray or Dataset Evaluated polynomial. See Also -------- xarray.DataArray.polyfit numpy.polynomial.polynomial.polyval """""" from .dataarray import DataArray from .dataset import Dataset deg_coord = coeffs[degree_dim] deg_idx_sorted = np.argsort(deg_coord.values) max_deg = deg_coord.max().values def ensure_numeric(data: DataArray) -> DataArray: if data.dtype.kind in ""mM"": return DataArray( datetime_to_numeric( data, offset=np.datetime64(""1970-01-01""), datetime_unit=""ns"" ), dims=data.dims, coords=data.coords, attrs=data.attrs, ) return data if isinstance(coord, Dataset): coord = coord.map(ensure_numeric) else: coord = ensure_numeric(coord) # using Horner's method # https://en.wikipedia.org/wiki/Horner%27s_method res = coeffs.isel({degree_dim: int(deg_idx_sorted[-1])}, drop=True) + zeros_like( coord ) deg_idx = len(deg_coord) - 2 for deg in range(max_deg - 1, -1, -1): res *= coord if deg_idx >= 0 and deg == int(deg_coord[deg_idx_sorted[deg_idx]]): # this degrees coefficient is provided, if not assume 0 res += coeffs.isel({degree_dim: int(deg_idx_sorted[deg_idx])}, drop=True) deg_idx -= 1 return res " 58283,"def create_file_name(song_name: str, song_artists: List[str]) -> str: # build file name of converted file artistStr = """" # ! we eliminate contributing artist names that are also in the song name, else we # ! would end up with things like 'Jetta, Mastubs - I'd love to change the world # ! (Mastubs REMIX).mp3' which is kinda an odd file name. for artist in song_artists: if artist.lower() not in song_name.lower(): artistStr += artist + "", "" # make sure that main artist is included in artistStr even if they # are in the song name, for example # Lil Baby - Never Recover (Lil Baby & Gunna, Drake).mp3 if song_artists[0].lower() not in artistStr.lower(): artistStr = song_artists[0] + "", "" + artistStr # ! the ...[:-2] is to avoid the last ', ' appended to artistStr convertedFileName = artistStr[:-2] + "" - "" + song_name # ! this is windows specific (disallowed chars) for disallowedChar in [""/"", ""?"", ""\\"", ""*"", ""|"", ""<"", "">""]: if disallowedChar in convertedFileName: convertedFileName = convertedFileName.replace(disallowedChar, """") # ! double quotes ("") and semi-colons (:) are also disallowed characters but we would # ! like to retain their equivalents, so they aren't removed in the prior loop convertedFileName = convertedFileName.replace('""', ""'"").replace("":"", ""-"") return convertedFileName ","def create_file_name(song_name: str, song_artists: List[str]) -> str: # build file name of converted file # the main artist is always included artistStr = song_artists[0].lower() # ! we eliminate contributing artist names that are also in the song name, else we # ! would end up with things like 'Jetta, Mastubs - I'd love to change the world # ! (Mastubs REMIX).mp3' which is kinda an odd file name. for artist in song_artists[1:]: if artist.lower() not in song_name.lower(): artistStr += "", "" + artist convertedFileName = artistStr + "" - "" + song_name # ! this is windows specific (disallowed chars) for disallowedChar in [""/"", ""?"", ""\\"", ""*"", ""|"", ""<"", "">""]: if disallowedChar in convertedFileName: convertedFileName = convertedFileName.replace(disallowedChar, """") # ! double quotes ("") and semi-colons (:) are also disallowed characters but we would # ! like to retain their equivalents, so they aren't removed in the prior loop convertedFileName = convertedFileName.replace('""', ""'"").replace("":"", ""-"") return convertedFileName " 12171,"def kill(restriction=None, connection=None, order_by=None): # pragma: no cover """""" view and kill database connections. :param restriction: restriction to be applied to processlist :param connection: a datajoint.Connection object. Default calls datajoint.conn() :param order_by: order by string clause for output ordering. defaults to 'id'. Restrictions are specified as strings and can involve any of the attributes of information_schema.processlist: ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO. Examples: dj.kill('HOST LIKE ""%compute%""') lists only connections from hosts containing ""compute"". dj.kill('TIME > 600') lists only connections in their current state for more than 10 minutes """""" if connection is None: connection = conn() query = 'SELECT * FROM information_schema.processlist WHERE id <> CONNECTION_ID()' + ( """" if restriction is None else ' AND (%s)' % restriction) + ( ' ORDER BY %s' % (order_by if order_by else 'id')) while True: print(' ID USER HOST STATE TIME INFO') print('+--+ +----------+ +-----------+ +-----------+ +-----+') cur = connection.query(query, as_dict=True) for process in cur: try: print('{ID:>4d} {USER:<12s} {HOST:<12s} {STATE:<12s} {TIME:>7d} {INFO}'.format(**process)) except TypeError: print(process) response = input('process to kill or ""q"" to quit > ') if response == 'q': break if response: try: pid = int(response) except ValueError: pass # ignore non-numeric input else: try: connection.query('kill %d' % pid) except pymysql.err.InternalError: print('Process not found') ","def kill(restriction=None, connection=None, order_by=None): # pragma: no cover """""" view and kill database connections. :param restriction: restriction to be applied to processlist :param connection: a datajoint.Connection object. Default calls datajoint.conn() :param order_by: order by string clause for output ordering. defaults to 'id'. Restrictions are specified as strings and can involve any of the attributes of information_schema.processlist: ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO. Examples: dj.kill('HOST LIKE ""%compute%""') lists only connections from hosts containing ""compute"". dj.kill('TIME > 600') lists only connections in their current state for more than 10 minutes """""" if connection is None: connection = conn() query = 'SELECT * FROM information_schema.processlist WHERE id <> CONNECTION_ID()' + ( """" if restriction is None else ' AND (%s)' % restriction) + ( ' ORDER BY %s' % (order_by or 'id')) while True: print(' ID USER HOST STATE TIME INFO') print('+--+ +----------+ +-----------+ +-----------+ +-----+') cur = connection.query(query, as_dict=True) for process in cur: try: print('{ID:>4d} {USER:<12s} {HOST:<12s} {STATE:<12s} {TIME:>7d} {INFO}'.format(**process)) except TypeError: print(process) response = input('process to kill or ""q"" to quit > ') if response == 'q': break if response: try: pid = int(response) except ValueError: pass # ignore non-numeric input else: try: connection.query('kill %d' % pid) except pymysql.err.InternalError: print('Process not found') " 51763,"def config_prefer_upstream(args): """"""Generate a packages config based on the configuration of all upstream installs."""""" scope = args.scope if scope is None: scope = spack.config.default_modify_scope('packages') specs = spack.store.db.query(installed=[InstallStatuses.INSTALLED]) pref_specs = [] for spec in specs: upstream = None try: upstream = spec.package.installed_upstream except spack.repo.UnknownNamespaceError as err: tty.die( ""Could not find package when checking spec {0} ({1}). "" ""This is usually due to your Spack instance not being "" ""configured to know about the upstream's repositories."" .format(spec.name, err.message) ) if (upstream and not args.local) or (not upstream and args.local): pref_specs.append(spec) conflicting_variants = set() pkgs = {} for spec in pref_specs: # Collect all the upstream compilers and versions for this package. pkg = pkgs.get(spec.name, { 'version': [], 'compiler': [], }) pkgs[spec.name] = pkg # We have no existing variant if this is our first added version. existing_variants = pkg.get('variants', None if not pkg['version'] else '') version = spec.version.string if version not in pkg['version']: pkg['version'].append(version) compiler = str(spec.compiler) if compiler not in pkg['compiler']: pkg['compiler'].append(compiler) # Get and list all the variants that differ from the default. variants = [] for var_name, variant in spec.variants.items(): if (var_name in ['patches'] or var_name not in spec.package.variants): continue if variant.value != spec.package.variants[var_name].default: variants.append(str(variant)) variants.sort() variants = ' '.join(variants) if spec.name not in conflicting_variants: # Only specify the variants if there's a single variant # set across all versions/compilers. if existing_variants is not None and existing_variants != variants: conflicting_variants.add(spec.name) del pkg['variants'] elif variants: pkg['variants'] = variants if conflicting_variants: tty.warn( ""The following packages have multiple conflicting upstream "" ""specs. You may have to specify, by "" ""concretized hash, which spec you want when building "" ""packages that depend on them:\n - {0}"" .format(""\n - "".join(sorted(conflicting_variants)))) # Simply write the config to the specified file. existing = spack.config.get('packages', scope=scope) new = spack.config.merge_yaml(existing, pkgs) spack.config.set('packages', new, scope) config_file = spack.config.config.get_config_filename(scope, section) tty.msg(""Updated config at {0}"".format(config_file)) ","def config_prefer_upstream(args): """"""Generate a packages config based on the configuration of all upstream installs."""""" scope = args.scope if scope is None: scope = spack.config.default_modify_scope('packages') all_specs = set(spack.store.db.query(installed=True)) local_specs = set(spack.store.db.query_local(installed=True)) pref_specs = local_specs if args.local else all_specs - local_spec conflicting_variants = set() pkgs = {} for spec in pref_specs: # Collect all the upstream compilers and versions for this package. pkg = pkgs.get(spec.name, { 'version': [], 'compiler': [], }) pkgs[spec.name] = pkg # We have no existing variant if this is our first added version. existing_variants = pkg.get('variants', None if not pkg['version'] else '') version = spec.version.string if version not in pkg['version']: pkg['version'].append(version) compiler = str(spec.compiler) if compiler not in pkg['compiler']: pkg['compiler'].append(compiler) # Get and list all the variants that differ from the default. variants = [] for var_name, variant in spec.variants.items(): if (var_name in ['patches'] or var_name not in spec.package.variants): continue if variant.value != spec.package.variants[var_name].default: variants.append(str(variant)) variants.sort() variants = ' '.join(variants) if spec.name not in conflicting_variants: # Only specify the variants if there's a single variant # set across all versions/compilers. if existing_variants is not None and existing_variants != variants: conflicting_variants.add(spec.name) del pkg['variants'] elif variants: pkg['variants'] = variants if conflicting_variants: tty.warn( ""The following packages have multiple conflicting upstream "" ""specs. You may have to specify, by "" ""concretized hash, which spec you want when building "" ""packages that depend on them:\n - {0}"" .format(""\n - "".join(sorted(conflicting_variants)))) # Simply write the config to the specified file. existing = spack.config.get('packages', scope=scope) new = spack.config.merge_yaml(existing, pkgs) spack.config.set('packages', new, scope) config_file = spack.config.config.get_config_filename(scope, section) tty.msg(""Updated config at {0}"".format(config_file)) " 24710,"def parameter_dict_from_yaml_file( parameter_file: str, use_wildcard: bool = False, target_nodes: Optional[List[str]] = None, namespace: str = '' ) -> Dict[str, ParameterMsg]: """""" Build a dict of parameters from a YAML file formatted as per ``ros2 param dump``. Will load all parameters if ``target_nodes`` is None :param parameter_file: Path to the YAML file to load parameters from. :param use_wildcard: Use wildcard matching for the target nodes. :param target_nodes: List of nodes in the YAML file to load parameters from. :param namespace: Namespace to prepend to all parameters. :return: A dict of Parameter objects keyed by the parameter names """""" with open(parameter_file, 'r') as f: param_file = yaml.safe_load(f) param_keys = [] param_dict = {} if use_wildcard and '/**' in param_file: param_keys.append('/**') if target_nodes: for n in target_nodes: if n not in param_file.keys(): raise RuntimeError(f'Param file does not contain parameters for {n},' f'only for nodes: {list(param_file.keys())} ') param_keys.append(n) else: # wildcard key must go to the front of param_keys so that # node-namespaced parameters will override the wildcard parameters keys = set(param_file.keys()) keys.discard('/**') param_keys.extend(keys) if len(param_keys) == 0: raise RuntimeError('Param file does not contain selected parameters') for n in param_keys: value = param_file[n] if type(value) != dict or 'ros__parameters' not in value: raise RuntimeError('Invalid structure of parameter file for node {}' 'expected same format as provided by ros2 param dump' .format(n)) param_dict.update(value['ros__parameters']) return _unpack_parameter_dict(namespace, param_dict) ","def parameter_dict_from_yaml_file( parameter_file: str, use_wildcard: bool = False, target_nodes: Optional[List[str]] = None, namespace: str = '' ) -> Dict[str, ParameterMsg]: """""" Build a dict of parameters from a YAML file. Will load all parameters if ``target_nodes`` is None :param parameter_file: Path to the YAML file to load parameters from. :param use_wildcard: Use wildcard matching for the target nodes. :param target_nodes: List of nodes in the YAML file to load parameters from. :param namespace: Namespace to prepend to all parameters. :return: A dict of Parameter objects keyed by the parameter names """""" with open(parameter_file, 'r') as f: param_file = yaml.safe_load(f) param_keys = [] param_dict = {} if use_wildcard and '/**' in param_file: param_keys.append('/**') if target_nodes: for n in target_nodes: if n not in param_file.keys(): raise RuntimeError(f'Param file does not contain parameters for {n},' f'only for nodes: {list(param_file.keys())} ') param_keys.append(n) else: # wildcard key must go to the front of param_keys so that # node-namespaced parameters will override the wildcard parameters keys = set(param_file.keys()) keys.discard('/**') param_keys.extend(keys) if len(param_keys) == 0: raise RuntimeError('Param file does not contain selected parameters') for n in param_keys: value = param_file[n] if type(value) != dict or 'ros__parameters' not in value: raise RuntimeError('Invalid structure of parameter file for node {}' 'expected same format as provided by ros2 param dump' .format(n)) param_dict.update(value['ros__parameters']) return _unpack_parameter_dict(namespace, param_dict) " 12999,"def _create_line_for_order( manager: ""PluginsManager"", checkout: ""Checkout"", checkout_line_info: ""CheckoutLineInfo"", discounts: Iterable[DiscountInfo], channel: ""Channel"", products_translation: Dict[int, Optional[str]], variants_translation: Dict[int, Optional[str]], ) -> OrderLine: """"""Create a line for the given order. :raises InsufficientStock: when there is not enough items in stock for this variant. """""" checkout_line = checkout_line_info.line quantity = checkout_line.quantity variant = checkout_line_info.variant channel_listing = checkout_line_info.channel_listing product = checkout_line_info.product collections = checkout_line_info.collections address = ( checkout.shipping_address or checkout.billing_address ) # FIXME: check which address we need here product_name = str(product) variant_name = str(variant) translated_product_name = products_translation.get(product.id, """") translated_variant_name = variants_translation.get(variant.id, """") if translated_product_name == product_name: translated_product_name = """" if translated_variant_name == variant_name: translated_variant_name = """" total_line_price = manager.calculate_checkout_line_total( checkout, checkout_line, variant, product, collections, address, channel, channel_listing, discounts, ) unit_price = manager.calculate_checkout_line_unit_price(total_line_price, quantity) tax_rate = manager.get_checkout_tax_rate( checkout, product, address, checkout_line, discounts, unit_price ) line = OrderLine( product_name=product_name, variant_name=variant_name, translated_product_name=translated_product_name, translated_variant_name=translated_variant_name, product_sku=variant.sku, is_shipping_required=variant.is_shipping_required(), quantity=quantity, variant=variant, unit_price=unit_price, # type: ignore total_price=total_line_price, tax_rate=tax_rate, ) return line ","def _create_line_for_order( manager: ""PluginsManager"", checkout: ""Checkout"", checkout_line_info: ""CheckoutLineInfo"", discounts: Iterable[DiscountInfo], channel: ""Channel"", products_translation: Dict[int, Optional[str]], variants_translation: Dict[int, Optional[str]], ) -> OrderLine: """"""Create a line for the given order. :raises InsufficientStock: when there is not enough items in stock for this variant. """""" checkout_line = checkout_line_info.line quantity = checkout_line.quantity variant = checkout_line_info.variant channel_listing = checkout_line_info.channel_listing product = checkout_line_info.product collections = checkout_line_info.collections address = ( checkout.shipping_address or checkout.billing_address ) # FIXME: check which address we need here product_name = str(product) variant_name = str(variant) translated_product_name = products_translation.get(product.id, """") translated_variant_name = variants_translation.get(variant.id, """") if translated_product_name == product_name: translated_product_name = """" if translated_variant_name == variant_name: translated_variant_name = """" total_line_price = manager.calculate_checkout_line_total( checkout, checkout_line, variant, product, collections, address, channel, channel_listing, discounts, ) unit_price = manager.calculate_checkout_line_unit_price(total_line_price, quantity) tax_rate = manager.get_checkout_line_tax_rate( checkout, product, address, checkout_line, discounts, unit_price ) line = OrderLine( product_name=product_name, variant_name=variant_name, translated_product_name=translated_product_name, translated_variant_name=translated_variant_name, product_sku=variant.sku, is_shipping_required=variant.is_shipping_required(), quantity=quantity, variant=variant, unit_price=unit_price, # type: ignore total_price=total_line_price, tax_rate=tax_rate, ) return line " 32484,"def modify_user(user_id, username, realname, status, notes, email, firstname, lastname, alias1, alias2, alias3, alias4, aliases): admin_api.update_user(user_id, username, realname, status, notes, email, firstname, lastname, alias1, alias2, alias3, alias4, aliases) demisto.results('Status for' + user_id + ' Successful updated to ' + status) ","def modify_user(user_id, username, realname, status, notes, email, firstname, lastname, alias1, alias2, alias3, alias4, aliases): admin_api.update_user(user_id, username, realname, status, notes, email, firstname, lastname, alias1, alias2, alias3, alias4, aliases) demisto.results('Status for ' + user_id + ' Successful updated to ' + status) " 53184,"def construct_types(types): event_levels = set() event_keywords = set() for event_type in types: if not isinstance(event_type, str): raise ConfigurationError('Values for event filter `type` must be strings.') event_type = event_type.lower() if event_type in EVENT_TYPES_TO_LEVEL: event_levels.add(EVENT_TYPES_TO_LEVEL[event_type]) elif event_type in EVENT_TYPES_TO_KEYWORD: event_keywords.add(EVENT_TYPES_TO_KEYWORD[event_type]) else: raise ConfigurationError('Unknown value for event filter `type`: {}'.format(event_type)) parts = ['Level={}'.format(value_to_xpath_string(value)) for value in sorted(event_levels)] + [ 'Keywords={}'.format(value_to_xpath_string(value)) for value in sorted(event_keywords) ] return combine_value_parts(parts) ","def construct_types(types): event_levels = set() event_keywords = set() for event_type in types: if not isinstance(event_type, str): raise ConfigurationError('Values for event filter `type` must be strings.') event_type = event_type.lower() if event_type in EVENT_TYPES_TO_LEVEL: event_levels.add(EVENT_TYPES_TO_LEVEL[event_type]) elif event_type in EVENT_TYPES_TO_KEYWORD: event_keywords.add(EVENT_TYPES_TO_KEYWORD[event_type]) else: raise ConfigurationError('Unknown value for event filter `type`: {}'.format(event_type)) parts = ['Level={}'.format(value_to_xpath_string(value)) for value in sorted(event_levels)] parts.extend('Keywords={}'.format(value_to_xpath_string(value)) for value in sorted(event_keywords)) return combine_value_parts(parts) " 49875,"def dc_ohmic_losses(ohms, current): """""" Returns ohmic losses in in units of power from the equivalent resistance of of the wires and the operating current. Parameters ---------- ohms: numeric, float current: numeric, float or array-like Returns ---------- numeric Single or array-like value of the losses in units of power References ---------- -- [1] PVsyst 7 Help. ""Array ohmic wiring loss"". https://www.pvsyst.com/help/ohmic_loss.htm """""" return ohms * current * current ","def dc_ohmic_losses(ohms, current): """""" Returns ohmic losses in in units of power from the equivalent resistance of of the wires and the operating current. Parameters ---------- ohms: numeric Equivalent resistance of wires [ohm] current: numeric, float or array-like Operating current [A] Returns ---------- numeric Single or array-like value of the losses in units of power References ---------- -- [1] PVsyst 7 Help. ""Array ohmic wiring loss"". https://www.pvsyst.com/help/ohmic_loss.htm """""" return ohms * current * current " 46520,"def get_random_basic_value(rng: Random, typ: str) -> Any: if typ == 'bool': return rng.choice((True, False)) if typ[:4] == 'uint': size = int(typ[4:]) assert size in (8, 16, 32, 64, 128, 256) return rng.randint(0, 2**size - 1) if typ == 'byte': return rng.randint(0, 8) else: raise ValueError(""Not a basic type"") ","def get_random_basic_value(rng: Random, typ: str) -> Any: if typ == 'bool': return rng.choice((True, False)) if typ[:4] == 'uint': size = int(typ[4:]) assert size in UINT_SIZES return rng.randint(0, 2**size - 1) if typ == 'byte': return rng.randint(0, 8) else: raise ValueError(""Not a basic type"") " 30908,"def poll_field(args: Dict[str, Any]) -> Tuple[str, dict, dict]: field = args.get('field') regex = args.get('regex', None) regex = re.compile(regex) if regex else None incident = demisto.incidents()[0] data = { 'field': field, 'exists': False } if field in incident: data['exists'] = check_field(incident.get(field), regex) else: custom_fields = incident.get('CustomFields', {}) if field in custom_fields: data['exists'] = check_field(custom_fields.get(field), regex) context = { 'PollingCheckField(val.field == obj.field)': data } human_readable = ""The field exists"" if data['exists'] else ""The field does not exist"" return human_readable, context, data ","def poll_field(args: Dict[str, Any]) -> Tuple[str, dict, dict]: field = args.get('field') regex = args.get('regex', None) regex = re.compile(regex) if regex else None incident = demisto.incidents()[0] data = { 'field': field, 'exists': False } if field in incident: data['exists'] = check_field(incident.get(field), regex) else: custom_fields = incident.get('CustomFields', {}) if field in custom_fields: data['exists'] = check_field(custom_fields.get(field), regex) context = { 'PollingCheckField(val.field == obj.field)': data } human_readable = 'The field exists.' if data['exists'] else 'The field does not exist.' return human_readable, context, data " 36245,"def sam( adata: AnnData, max_iter: int = 10, num_norm_avg: int = 50, k: int = 20, distance: str = 'correlation', standardization: Optional[str] = 'Normalizer', weight_pcs: bool = True, npcs: Optional[int] = None, n_genes: Optional[int] = None, projection: Optional[str] = 'umap', inplace: bool = True, verbose: bool = True, ) -> Optional[AnnData]: """"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool. SAM iteratively rescales the input gene expression matrix to emphasize genes that are spatially variable along the intrinsic manifold of the data. It outputs the gene weights, nearest neighbor matrix, and a 2D projection. The AnnData input should contain unstandardized, non-negative values. Preferably, the data should be log-normalized and no genes should be filtered out. Parameters ---------- k - int, optional, default 20 The number of nearest neighbors to identify for each cell. distance : string, optional, default 'correlation' The distance metric to use when identifying nearest neighbors. Can be any of the distance metrics supported by sklearn's 'pdist'. max_iter - int, optional, default 10 The maximum number of iterations SAM will run. projection - str, optional, default 'umap' If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP embedding. Otherwise, no embedding will be generated. standardization - str, optional, default 'Normalizer' If 'Normalizer', use sklearn.preprocessing.Normalizer, which normalizes expression data prior to PCA such that each cell has unit L2 norm. If 'StandardScaler', use sklearn.preprocessing.StandardScaler, which normalizes expression data prior to PCA such that each gene has zero mean and unit variance. Otherwise, do not normalize the expression data. We recommend using 'StandardScaler' for large datasets with many expected cell types and 'Normalizer' otherwise. num_norm_avg - int, optional, default 50 The top 'num_norm_avg' dispersions are averaged to determine the normalization factor when calculating the weights. This prevents genes with large spatial dispersions from skewing the distribution of weights. weight_pcs - bool, optional, default True If True, scale the principal components by their eigenvalues. In datasets with many expected cell types, setting this to False might improve the resolution as these cell types might be encoded by low- variance principal components. npcs - int, optional, default None, Determines the number of top principal components selected at each iteration of the SAM algorithm. If None, this number is chosen automatically based on the size of the dataset. If weight_pcs is set to True, this parameter primarily affects the runtime of the SAM algorithm (more PCs = longer runtime). n_genes - int, optional, default None: Determines the number of top SAM-weighted genes to use at each iteration of the SAM algorithm. If None, this number is chosen automatically based on the size of the dataset. This parameter primarily affects the runtime of the SAM algorithm (more genes = longer runtime). inplace - bool, optional, default True: Set fields in `adata` if True. Otherwise, returns a copy. verbose - bool, optional, default True: If True, displays SAM log statements. Returns ------- sam - SAM The SAM object adata - AnnData `.var['weights']` SAM weights for each gene. `.var['spatial_dispersions']` Spatial dispersions for each gene (these are used to compute the SAM weights) `.var['mask_genes']` If preprocessed with SAM, this boolean vector indicates which genes were filtered out (=False). `.uns['preprocess_args']` Dictionary of parameters used for preprocessing. `.uns['run_args']` Dictionary of parameters used for running SAM. `.uns['pca_obj']` The sklearn.decomposition.PCA object. `.uns['X_processed']` The standardized and SAM-weighted data fed into PCA. `.uns['neighbors']` A dictionary with key 'connectivities' containing the kNN adjacency matrix output by SAM. If built-in scanpy dimensionality reduction methods are to be used using the SAM-output AnnData, users should recompute the neighbors using `.obs['X_pca']` with `scanpy.pp.neighbors`. `.uns['ranked_genes']` Gene IDs ranked in descending order by their SAM weights. `.obsm['X_pca']` The principal components output by SAM. `.obsm['X_umap']` The UMAP projection output by SAM. `.layers['X_disp']` The expression matrix used for nearest-neighbor averaging. `.layers['X_knn_avg']` The nearest-neighbor-averaged expression data used for computing the spatial dispersions of genes. Example ------- >>> import scanpy.external as sce >>> import scanpy as sc *** Running SAM *** Assuming we are given an AnnData object called `adata`, we can run the SAM algorithm as follows: >>> sam,adata = sce.tl.SAM(adata,inplace=True) The input AnnData object should contain unstandardized, non-negative expression values. Preferably, the data should be log-normalized and no genes should be filtered out. Please see the documentation for a description of all available parameters. For more detailed tutorials, please visit the original Github repository: https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial *** Plotting *** To visualize the output, we can use the built-in `scatter` function (this assumes that `matplotlib` is installed.) >>> sam.scatter(projection = 'X_umap') `scatter` accepts all keyword arguments used in the `matplotlib.pyplot.scatter` function. Please visit the plotting tutorials for more information: https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial/SAM_Plotting *** SAMGUI *** SAM comes with the SAMGUI module, a graphical-user interface written with `Plotly` and `ipythonwidgets` for interactively exploring and annotating the scRNAseq data and running SAM. Dependencies can be installed with Anaconda by following the instructions in the self-assembling-manifold Github README: https://github.com/atarashansky/self-assembling-manifold In a Jupyter notebook, execute the following to launch the interface: >>> from SAMGUI import SAMGUI >>> sam_gui = SAMGUI(sam) # sam is your SAM object >>> sam_gui.SamPlot This can also be enabled in Jupyer Lab by following the instructions in the self-assembling-manifold README. """""" logg.info('Self-assembling manifold') try: from SAM import SAM except ImportError: raise ImportError( '\nplease install sam-algorithm: \n\n' '\tgit clone git://github.com/atarashansky/self-assembling-manifold.git\n' '\tcd self-assembling-manifold\n' '\tpip install .' ) s = SAM(counts=adata, inplace=inplace) logg.info('Running SAM') s.run( max_iter=max_iter, num_norm_avg=num_norm_avg, k=k, distance=distance, preprocessing=standardization, weight_PCs=weight_pcs, npcs=npcs, n_genes=n_genes, projection=projection, verbose=verbose, ) return (s, adata) if inplace else (s, s.adata) ","def sam( adata: AnnData, max_iter: int = 10, num_norm_avg: int = 50, k: int = 20, distance: str = 'correlation', standardization: Optional[str] = 'Normalizer', weight_pcs: bool = True, npcs: Optional[int] = None, n_genes: Optional[int] = None, projection: Optional[str] = 'umap', inplace: bool = True, verbose: bool = True, ) -> Optional[AnnData]: """"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool. SAM iteratively rescales the input gene expression matrix to emphasize genes that are spatially variable along the intrinsic manifold of the data. It outputs the gene weights, nearest neighbor matrix, and a 2D projection. The AnnData input should contain unstandardized, non-negative values. Preferably, the data should be log-normalized and no genes should be filtered out. Parameters ---------- k - int, optional, default 20 The number of nearest neighbors to identify for each cell. distance : string, optional, default 'correlation' The distance metric to use when identifying nearest neighbors. Can be any of the distance metrics supported by sklearn's 'pdist'. max_iter The maximum number of iterations SAM will run. projection - str, optional, default 'umap' If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP embedding. Otherwise, no embedding will be generated. standardization - str, optional, default 'Normalizer' If 'Normalizer', use sklearn.preprocessing.Normalizer, which normalizes expression data prior to PCA such that each cell has unit L2 norm. If 'StandardScaler', use sklearn.preprocessing.StandardScaler, which normalizes expression data prior to PCA such that each gene has zero mean and unit variance. Otherwise, do not normalize the expression data. We recommend using 'StandardScaler' for large datasets with many expected cell types and 'Normalizer' otherwise. num_norm_avg - int, optional, default 50 The top 'num_norm_avg' dispersions are averaged to determine the normalization factor when calculating the weights. This prevents genes with large spatial dispersions from skewing the distribution of weights. weight_pcs - bool, optional, default True If True, scale the principal components by their eigenvalues. In datasets with many expected cell types, setting this to False might improve the resolution as these cell types might be encoded by low- variance principal components. npcs - int, optional, default None, Determines the number of top principal components selected at each iteration of the SAM algorithm. If None, this number is chosen automatically based on the size of the dataset. If weight_pcs is set to True, this parameter primarily affects the runtime of the SAM algorithm (more PCs = longer runtime). n_genes - int, optional, default None: Determines the number of top SAM-weighted genes to use at each iteration of the SAM algorithm. If None, this number is chosen automatically based on the size of the dataset. This parameter primarily affects the runtime of the SAM algorithm (more genes = longer runtime). inplace - bool, optional, default True: Set fields in `adata` if True. Otherwise, returns a copy. verbose - bool, optional, default True: If True, displays SAM log statements. Returns ------- sam - SAM The SAM object adata - AnnData `.var['weights']` SAM weights for each gene. `.var['spatial_dispersions']` Spatial dispersions for each gene (these are used to compute the SAM weights) `.var['mask_genes']` If preprocessed with SAM, this boolean vector indicates which genes were filtered out (=False). `.uns['preprocess_args']` Dictionary of parameters used for preprocessing. `.uns['run_args']` Dictionary of parameters used for running SAM. `.uns['pca_obj']` The sklearn.decomposition.PCA object. `.uns['X_processed']` The standardized and SAM-weighted data fed into PCA. `.uns['neighbors']` A dictionary with key 'connectivities' containing the kNN adjacency matrix output by SAM. If built-in scanpy dimensionality reduction methods are to be used using the SAM-output AnnData, users should recompute the neighbors using `.obs['X_pca']` with `scanpy.pp.neighbors`. `.uns['ranked_genes']` Gene IDs ranked in descending order by their SAM weights. `.obsm['X_pca']` The principal components output by SAM. `.obsm['X_umap']` The UMAP projection output by SAM. `.layers['X_disp']` The expression matrix used for nearest-neighbor averaging. `.layers['X_knn_avg']` The nearest-neighbor-averaged expression data used for computing the spatial dispersions of genes. Example ------- >>> import scanpy.external as sce >>> import scanpy as sc *** Running SAM *** Assuming we are given an AnnData object called `adata`, we can run the SAM algorithm as follows: >>> sam,adata = sce.tl.SAM(adata,inplace=True) The input AnnData object should contain unstandardized, non-negative expression values. Preferably, the data should be log-normalized and no genes should be filtered out. Please see the documentation for a description of all available parameters. For more detailed tutorials, please visit the original Github repository: https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial *** Plotting *** To visualize the output, we can use the built-in `scatter` function (this assumes that `matplotlib` is installed.) >>> sam.scatter(projection = 'X_umap') `scatter` accepts all keyword arguments used in the `matplotlib.pyplot.scatter` function. Please visit the plotting tutorials for more information: https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial/SAM_Plotting *** SAMGUI *** SAM comes with the SAMGUI module, a graphical-user interface written with `Plotly` and `ipythonwidgets` for interactively exploring and annotating the scRNAseq data and running SAM. Dependencies can be installed with Anaconda by following the instructions in the self-assembling-manifold Github README: https://github.com/atarashansky/self-assembling-manifold In a Jupyter notebook, execute the following to launch the interface: >>> from SAMGUI import SAMGUI >>> sam_gui = SAMGUI(sam) # sam is your SAM object >>> sam_gui.SamPlot This can also be enabled in Jupyer Lab by following the instructions in the self-assembling-manifold README. """""" logg.info('Self-assembling manifold') try: from SAM import SAM except ImportError: raise ImportError( '\nplease install sam-algorithm: \n\n' '\tgit clone git://github.com/atarashansky/self-assembling-manifold.git\n' '\tcd self-assembling-manifold\n' '\tpip install .' ) s = SAM(counts=adata, inplace=inplace) logg.info('Running SAM') s.run( max_iter=max_iter, num_norm_avg=num_norm_avg, k=k, distance=distance, preprocessing=standardization, weight_PCs=weight_pcs, npcs=npcs, n_genes=n_genes, projection=projection, verbose=verbose, ) return (s, adata) if inplace else (s, s.adata) " 10530,"def get_sysctl(module, prefixes): sysctl_cmd = module.get_bin_path('sysctl') cmd = [sysctl_cmd] cmd.extend(prefixes) sysctl = dict() try: rc, out, err = module.run_command(cmd) except (IOError, OSError) as e: module.warn('Unable to read sysctl: %s' % to_text(e)) rc = 1 if rc == 0: key = '' value = '' for line in out.splitlines(): if not line.strip(): continue if line.startswith(' '): # handle multiline values, they will not have a starting key value +=line continue if key: sysctl[key] = value.strip() try: (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1) except Exception as e: module.warn('Unable to spit sysctl line (%s): %s' % (to_text(line), to_text(e))) else: if key: sysctl[key] = value.strip() return sysctl ","def get_sysctl(module, prefixes): sysctl_cmd = module.get_bin_path('sysctl') cmd = [sysctl_cmd] cmd.extend(prefixes) sysctl = dict() try: rc, out, err = module.run_command(cmd) except (IOError, OSError) as e: module.warn('Unable to read sysctl: %s' % to_text(e)) rc = 1 if rc == 0: key = '' value = '' for line in out.splitlines(): if not line.strip(): continue if line.startswith(' '): # handle multiline values, they will not have a starting key value +=line continue if key: sysctl[key] = value.strip() try: (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1) except Exception as e: module.warn('Unable to split sysctl line (%s): %s' % (to_text(line), to_text(e))) else: if key: sysctl[key] = value.strip() return sysctl " 672,"def show_image(obj): if not obj.image: return 'None' img_url = ""%s%s"" % (settings.MEDIA_URL, obj.image) return ('' % (img_url, img_url)) ","def show_image(obj): if not obj.image: return 'None' img_url = obj.image.url return ('' % (img_url, img_url)) " 11978,"def test_pcolormesh_partially_masked(): data = np.ma.masked_all((40, 30))[:-1, :-1] data[0:100] = 10 # Check that a partially masked data array does trigger a pcolor call. with mock.patch('cartopy.mpl.geoaxes.GeoAxes.pcolor') as pcolor: ax = plt.axes(projection=ccrs.PlateCarree()) ax.pcolormesh(np.linspace(0, 360, 30), np.linspace(-90, 90, 40), data) assert pcolor.call_count == 1, (""pcolor should have been called "" ""exactly once."") plt.close() ","def test_pcolormesh_partially_masked(): data = np.ma.masked_all((39, 29)) data[0:100] = 10 # Check that a partially masked data array does trigger a pcolor call. with mock.patch('cartopy.mpl.geoaxes.GeoAxes.pcolor') as pcolor: ax = plt.axes(projection=ccrs.PlateCarree()) ax.pcolormesh(np.linspace(0, 360, 30), np.linspace(-90, 90, 40), data) assert pcolor.call_count == 1, (""pcolor should have been called "" ""exactly once."") plt.close() " 31803,"def clean_user_query(query: str) -> Dict[str, Any]: """""" Takes the query string created by the user, adds necessary argument and removes unnecessary arguments Args: query: User's query string Returns: Dict which has only needed arguments to be sent to MISP """""" try: params = json.loads(query) params[""returnFormat""] = ""json"" params.pop(""timestamp"", None) except Exception as err: demisto.debug(str(err)) raise DemistoException(f'Could not parse user query. \n\nError massage: {err}') return params ","def clean_user_query(query: str) -> Dict[str, Any]: """""" Takes the query string created by the user, adds necessary argument and removes unnecessary arguments Args: query: User's query string Returns: Dict which has only needed arguments to be sent to MISP """""" try: params = json.loads(query) params[""returnFormat""] = ""json"" params.pop(""timestamp"", None) except Exception as err: demisto.debug(str(err)) raise DemistoException(f'Could not parse user query.\nError massage: {err}') return params " 42908,"def graph_embed(A, mean_photon=1.0, make_traceless=False, atol=1e-08): r""""""Embed a graph into a Gaussian state. Given a graph in terms of a symmetric adjacency matrix (in general with arbitrary complex off-diagonal and real diagonal entries), returns the squeezing parameters and interferometer necessary for creating the Gaussian state whose off-diagonal parts are proportional to that matrix. Uses :func:`takagi`. Args: A (array[complex]): square, symmetric (weighted) adjacency matrix of the graph max_mean_photon (float): It guarantees that the mean photon number in the pure Gaussian state representing the graph satisfies :math:`\sum_i sinh(r_{i})^2 ==` ``mean_photon``. make_traceless (bool): Removes the trace of the input matrix, by performing the transformation :math:`\tilde{A} = A-\mathrm{tr}(A) \I/n`. This may reduce the amount of squeezing needed to encode the graph. rtol (float): relative tolerance used when checking if the input matrix is symmetric. atol (float): absolute tolerance used when checking if the input matrix is symmetric. Returns: tuple[array, array]: squeezing parameters of the input state to the interferometer, and the unitary matrix representing the interferometer """""" (m, n) = A.shape if m != n: raise ValueError(""The matrix is not square."") # if not np.allclose(A, np.transpose(A), rtol=rtol, atol=atol) # raise ValueError(""The matrix is not symmetric."") if make_traceless: A = A - np.trace(A) * np.identity(n) / n scale = find_scaling_adjacency_matrix(A, mean_photon) A = scale * A s, U = takagi(A, tol=atol) vals = -np.arctanh(s) return vals, U ","def graph_embed(A, mean_photon=1.0, make_traceless=False, atol=1e-08): r""""""Embed a graph into a Gaussian state. Given a graph in terms of a symmetric adjacency matrix (in general with arbitrary complex off-diagonal and real diagonal entries), returns the squeezing parameters and interferometer necessary for creating the Gaussian state whose off-diagonal parts are proportional to that matrix. Uses :func:`takagi`. Args: A (array[complex]): square, symmetric (weighted) adjacency matrix of the graph max_mean_photon (float): ``graph_embed`` guarantees that the mean photon number in the pure Gaussian state representing the graph satisfies :math:`\sum_i sinh(r_{i})^2 ==` ``mean_photon``. make_traceless (bool): Removes the trace of the input matrix, by performing the transformation :math:`\tilde{A} = A-\mathrm{tr}(A) \I/n`. This may reduce the amount of squeezing needed to encode the graph. rtol (float): relative tolerance used when checking if the input matrix is symmetric. atol (float): absolute tolerance used when checking if the input matrix is symmetric. Returns: tuple[array, array]: squeezing parameters of the input state to the interferometer, and the unitary matrix representing the interferometer """""" (m, n) = A.shape if m != n: raise ValueError(""The matrix is not square."") # if not np.allclose(A, np.transpose(A), rtol=rtol, atol=atol) # raise ValueError(""The matrix is not symmetric."") if make_traceless: A = A - np.trace(A) * np.identity(n) / n scale = find_scaling_adjacency_matrix(A, mean_photon) A = scale * A s, U = takagi(A, tol=atol) vals = -np.arctanh(s) return vals, U " 50608,"def get_sim_time(units: str = ""step"") -> int: """"""Retrieves the simulation time from the simulator. Args: units: String specifying the units of the result (one of ``None``, ``'fs'``, ``'ps'``, ``'ns'``, ``'us'``, ``'ms'``, ``'sec'``). ``None`` will return the raw simulation time. Returns: The simulation time in the specified units. .. versionchanged:: 1.6.0 Support ``'step'`` as the the *units* argument to mean ""simulator time step"". """""" timeh, timel = simulator.get_sim_time() result = (timeh << 32 | timel) if units not in (None, ""step""): result = get_time_from_sim_steps(result, units) if units is None: warnings.warn( 'Using units=None is deprecated, use units=""step"" instead.', DeprecationWarning, stacklevel=2) return result ","def get_sim_time(units: str = ""step"") -> int: """"""Retrieves the simulation time from the simulator. Args: units: String specifying the units of the result (one of ``'step'``, ``'fs'``, ``'ps'``, ``'ns'``, ``'us'``, ``'ms'``, ``'sec'``). ``'step'`` will return the raw simulation time. .. deprecated:: 1.6.0 Using ``None`` as the *units* argument is deprecated, use ``'step'`` instead. Returns: The simulation time in the specified units. .. versionchanged:: 1.6.0 Support ``'step'`` as the the *units* argument to mean ""simulator time step"". """""" timeh, timel = simulator.get_sim_time() result = (timeh << 32 | timel) if units not in (None, ""step""): result = get_time_from_sim_steps(result, units) if units is None: warnings.warn( 'Using units=None is deprecated, use units=""step"" instead.', DeprecationWarning, stacklevel=2) return result " 38901,"def field_schema( field: Field, *, by_alias: bool = True, model_name_map: Dict[Type['BaseModel'], str], ref_prefix: Optional[str] = None, known_models: Set[Type['BaseModel']] = None, ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """""" Process a Pydantic field and return a tuple with a JSON Schema for it as the first item. Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they will be included in the definitions and referenced in the schema instead of included recursively. :param field: a Pydantic ``Field`` :param by_alias: use the defined alias (if any) in the returned schema :param model_name_map: used to generate the JSON Schema references to other models included in the definitions :param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of #/definitions/ will be used :param known_models: used to solve circular references :return: tuple of the schema for this field and additional definitions """""" ref_prefix = ref_prefix or default_prefix known_models = known_models or set() schema_overrides = False schema = cast('Schema', field.schema) s = dict(title=schema.title or field.alias.title()) if schema.title: schema_overrides = True if schema.description: s['description'] = schema.description schema_overrides = True if not field.required and not (field.schema is not None and field.schema.const) and field.default is not None: s['default'] = encode_default(field.default) schema_overrides = True validation_schema = get_field_schema_validations(field) if validation_schema: s.update(validation_schema) schema_overrides = True f_schema, f_definitions = field_type_schema( field, by_alias=by_alias, model_name_map=model_name_map, schema_overrides=schema_overrides, ref_prefix=ref_prefix, known_models=known_models, ) # $ref will only be returned when there are no schema_overrides if '$ref' in f_schema: return f_schema, f_definitions else: s.update(f_schema) return s, f_definitions ","def field_schema( field: Field, *, by_alias: bool = True, model_name_map: Dict[Type['BaseModel'], str], ref_prefix: Optional[str] = None, known_models: Set[Type['BaseModel']] = None, ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """""" Process a Pydantic field and return a tuple with a JSON Schema for it as the first item. Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they will be included in the definitions and referenced in the schema instead of included recursively. :param field: a Pydantic ``Field`` :param by_alias: use the defined alias (if any) in the returned schema :param model_name_map: used to generate the JSON Schema references to other models included in the definitions :param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of #/definitions/ will be used :param known_models: used to solve circular references :return: tuple of the schema for this field and additional definitions """""" ref_prefix = ref_prefix or default_prefix known_models = known_models or set() schema_overrides = False schema = cast('Schema', field.schema) s = dict(title=schema.title or field.alias.title()) if schema.title: schema_overrides = True if schema.description: s['description'] = schema.description schema_overrides = True if not field.required and not (field.schema is not None and field.schema.const) and field.default is not None: s['default'] = encode_default(field.default) schema_overrides = True validation_schema = get_field_schema_validations(field) if validation_schema: s.update(validation_schema) schema_overrides = True f_schema, f_definitions = field_type_schema( field, by_alias=by_alias, model_name_map=model_name_map, schema_overrides=schema_overrides, ref_prefix=ref_prefix, known_models=known_models or set(), ) # $ref will only be returned when there are no schema_overrides if '$ref' in f_schema: return f_schema, f_definitions else: s.update(f_schema) return s, f_definitions " 3946,"def prepare_rookout_token() -> None: """"""Load rookout token into memory while we are still root"""""" global ROOKOUT_TOKEN try: with open(ROOKOUT_TOKEN_PATH, encoding=""utf-8"") as _rookout_token_file: ROOKOUT_TOKEN = _rookout_token_file.read().strip() except OSError as exc: log.warning(""Failed to load rookout token: %s"", exc) ","def prepare_rookout_token() -> None: """"""Load rookout token into memory while we are still root"""""" global ROOKOUT_TOKEN try: with open(ROOKOUT_TOKEN_PATH, encoding=""utf-8"") as _rookout_token_file: ROOKOUT_TOKEN = _rookout_token_file.read().strip() except OSError as exc: log.exception(""Failed to load rookout token"") " 43850,"def IsingYY(phi): r""""""Ising YY coupling gate .. math:: YY(\phi) = \begin{bmatrix} \cos(\phi / 2) & 0 & 0 & i \sin(\phi / 2) \\ 0 & \cos(\phi / 2) & -i \sin(\phi / 2) & 0 \\ 0 & -i \sin(\phi / 2) & \cos(\phi / 2) & 0 \\ i \sin(\phi / 2) & 0 & 0 & \cos(\phi / 2) \end{bmatrix}. Args: phi (float): rotation angle :math:`\phi` Returns: array[complex]: unitary 4x4 rotation matrix """""" return np.cos(phi / 2) * II - 1j * np.sin(phi / 2) * YY ","def IsingYY(phi): r""""""Ising YY coupling gate. .. math:: YY(\phi) = \begin{bmatrix} \cos(\phi / 2) & 0 & 0 & i \sin(\phi / 2) \\ 0 & \cos(\phi / 2) & -i \sin(\phi / 2) & 0 \\ 0 & -i \sin(\phi / 2) & \cos(\phi / 2) & 0 \\ i \sin(\phi / 2) & 0 & 0 & \cos(\phi / 2) \end{bmatrix}. Args: phi (float): rotation angle :math:`\phi` Returns: array[complex]: unitary 4x4 rotation matrix """""" return np.cos(phi / 2) * II - 1j * np.sin(phi / 2) * YY " 7871,"def check_type(name, value, expected_type, expected_iter_type=None, *, none_ok=False): """"""Ensure that an object is of an expected type. Optionally, if the object is iterable, check that each element is of a particular type. Parameters ---------- name : str Description of value being checked value : object Object to check type of expected_type : type or Iterable of type type to check object against expected_iter_type : type or Iterable of type or None, optional Expected type of each element in value, assuming it is iterable. If None, no check will be performed. none_ok : bool Whether None is allowed as a value """""" if none_ok and value is None: return if not isinstance(value, expected_type): if isinstance(expected_type, Iterable): msg = 'Unable to set ""{}"" to ""{}"" which is not one of the ' \ 'following types: ""{}""'.format(name, value, ', '.join( [t.__name__ for t in expected_type])) else: msg = 'Unable to set ""{}"" to ""{}"" which is not of type ""{}""'.format( name, value, expected_type.__name__) raise TypeError(msg) if expected_iter_type: if isinstance(value, np.ndarray): if not issubclass(value.dtype.type, expected_iter_type): msg = 'Unable to set ""{}"" to ""{}"" since each item must be ' \ 'of type ""{}""'.format(name, value, expected_iter_type.__name__) raise TypeError(msg) else: return for item in value: if not isinstance(item, expected_iter_type): if isinstance(expected_iter_type, Iterable): msg = 'Unable to set ""{}"" to ""{}"" since each item must be ' \ 'one of the following types: ""{}""'.format( name, value, ', '.join([t.__name__ for t in expected_iter_type])) else: msg = 'Unable to set ""{}"" to ""{}"" since each item must be ' \ 'of type ""{}""'.format(name, value, expected_iter_type.__name__) raise TypeError(msg) ","def check_type(name, value, expected_type, expected_iter_type=None, *, none_ok=False): """"""Ensure that an object is of an expected type. Optionally, if the object is iterable, check that each element is of a particular type. Parameters ---------- name : str Description of value being checked value : object Object to check type of expected_type : type or Iterable of type type to check object against expected_iter_type : type or Iterable of type or None, optional Expected type of each element in value, assuming it is iterable. If None, no check will be performed. none_ok : bool, optional Whether None is allowed as a value """""" if none_ok and value is None: return if not isinstance(value, expected_type): if isinstance(expected_type, Iterable): msg = 'Unable to set ""{}"" to ""{}"" which is not one of the ' \ 'following types: ""{}""'.format(name, value, ', '.join( [t.__name__ for t in expected_type])) else: msg = 'Unable to set ""{}"" to ""{}"" which is not of type ""{}""'.format( name, value, expected_type.__name__) raise TypeError(msg) if expected_iter_type: if isinstance(value, np.ndarray): if not issubclass(value.dtype.type, expected_iter_type): msg = 'Unable to set ""{}"" to ""{}"" since each item must be ' \ 'of type ""{}""'.format(name, value, expected_iter_type.__name__) raise TypeError(msg) else: return for item in value: if not isinstance(item, expected_iter_type): if isinstance(expected_iter_type, Iterable): msg = 'Unable to set ""{}"" to ""{}"" since each item must be ' \ 'one of the following types: ""{}""'.format( name, value, ', '.join([t.__name__ for t in expected_iter_type])) else: msg = 'Unable to set ""{}"" to ""{}"" since each item must be ' \ 'of type ""{}""'.format(name, value, expected_iter_type.__name__) raise TypeError(msg) " 17204,"def _build_base_url( entry: ConfigEntry, ) -> str: """"""Build base url for System Bridge media."""""" return ( f""http://{entry.data[CONF_HOST]}:{entry.data[CONF_PORT]}"" + f""/api/media/file/data?apiKey={entry.data[CONF_API_KEY]}"" ) ","def _build_base_url( entry: ConfigEntry, ) -> str: """"""Build base url for System Bridge media."""""" return ( f""http://{entry.data[CONF_HOST]}:{entry.data[CONF_PORT]}"" f""/api/media/file/data?apiKey={entry.data[CONF_API_KEY]}"" ) " 5617,"def jaccard(u, v, w=None): """""" Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays. The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`, is defined as .. math:: \\frac{c_{TF} + c_{FT}} {c_{TT} + c_{FT} + c_{TF}} where :math:`c_{ij}` is the number of occurrences of :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for :math:`k < n`. Parameters ---------- u : (N,) array_like, bool Input array. v : (N,) array_like, bool Input array. w : (N,) array_like, optional The weights for each value in `u` and `v`. Default is None, which gives each value a weight of 1.0 Returns ------- jaccard : double The Jaccard distance between vectors `u` and `v`. Notes ----- When both `u` and `v` lead to a `0/0` divsion i.e. there is no overlap between the items in the vectors the returned distance is 0. See the Wikipedia page on the Jaccard index [1]_, and this paper [2]_. References ---------- .. [1] https://en.wikipedia.org/wiki/Jaccard_index .. [2] S. Kosub, ""A note on the triangel inequalty for the Jaccard dstance"", 2016, Available online: https://arxiv.org/pdf/1612.02696.pdf Examples -------- >>> from scipy.spatial import distance >>> distance.jaccard([1, 0, 0], [0, 1, 0]) 1.0 >>> distance.jaccard([1, 0, 0], [1, 1, 0]) 0.5 >>> distance.jaccard([1, 0, 0], [1, 2, 0]) 0.5 >>> distance.jaccard([1, 0, 0], [1, 1, 1]) 0.66666666666666663 """""" u = _validate_vector(u) v = _validate_vector(v) nonzero = np.bitwise_or(u != 0, v != 0) unequal_nonzero = np.bitwise_and((u != v), nonzero) if w is not None: w = _validate_weights(w) nonzero = w * nonzero unequal_nonzero = w * unequal_nonzero a = np.double(unequal_nonzero.sum()) b = np.double(nonzero.sum()) return (a / b) if b != 0 else 0 ","def jaccard(u, v, w=None): """""" Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays. The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`, is defined as .. math:: \\frac{c_{TF} + c_{FT}} {c_{TT} + c_{FT} + c_{TF}} where :math:`c_{ij}` is the number of occurrences of :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for :math:`k < n`. Parameters ---------- u : (N,) array_like, bool Input array. v : (N,) array_like, bool Input array. w : (N,) array_like, optional The weights for each value in `u` and `v`. Default is None, which gives each value a weight of 1.0 Returns ------- jaccard : double The Jaccard distance between vectors `u` and `v`. Notes ----- When both `u` and `v` lead to a `0/0` divsion i.e. there is no overlap between the items in the vectors the returned distance is 0. See the Wikipedia page on the Jaccard index [1]_, and this paper [2]_. References ---------- .. [1] https://en.wikipedia.org/wiki/Jaccard_index .. [2] S. Kosub, ""A note on the triangel inequalty for the Jaccard distance"", 2016, Available online: https://arxiv.org/pdf/1612.02696.pdf Examples -------- >>> from scipy.spatial import distance >>> distance.jaccard([1, 0, 0], [0, 1, 0]) 1.0 >>> distance.jaccard([1, 0, 0], [1, 1, 0]) 0.5 >>> distance.jaccard([1, 0, 0], [1, 2, 0]) 0.5 >>> distance.jaccard([1, 0, 0], [1, 1, 1]) 0.66666666666666663 """""" u = _validate_vector(u) v = _validate_vector(v) nonzero = np.bitwise_or(u != 0, v != 0) unequal_nonzero = np.bitwise_and((u != v), nonzero) if w is not None: w = _validate_weights(w) nonzero = w * nonzero unequal_nonzero = w * unequal_nonzero a = np.double(unequal_nonzero.sum()) b = np.double(nonzero.sum()) return (a / b) if b != 0 else 0 " 46577,"def invalid_cases(): rng = Random(1234) for (name, (typ, offsets)) in PRESET_CONTAINERS.items(): # using mode_max_count, so that the extra byte cannot be picked up as normal list content yield f'{name}_extra_byte', \ invalid_test_case(lambda: serialize( container_case_fn(rng, RandomizationMode.mode_max_count, typ)) + b'\xff') if len(offsets) != 0: # Note: there are many more ways to have invalid offsets, # these are just example to get clients started looking into hardening ssz. for mode in [RandomizationMode.mode_random, RandomizationMode.mode_nil_count, RandomizationMode.mode_one_count, RandomizationMode.mode_max_count]: if len(offsets) != 0: for index, offset_index in enumerate(offsets): yield f'{name}_{mode.to_name()}_offset_{offset_index}_plus_one', \ invalid_test_case(lambda: mod_offset( b=serialize(container_case_fn(rng, mode, typ)), offset_index=offset_index, change=lambda x: x + 1 )) yield f'{name}_{mode.to_name()}_offset_{offset_index}_zeroed', \ invalid_test_case(lambda: mod_offset( b=serialize(container_case_fn(rng, mode, typ)), offset_index=offset_index, change=lambda x: 0 )) if index == 0: yield f'{name}_{mode.to_name()}_first offset_{offset_index}_minus_one', \ invalid_test_case(lambda: mod_offset( b=serialize(container_case_fn(rng, mode, typ)), offset_index=offset_index, change=lambda x: x - 1 )) if mode == RandomizationMode.mode_max_count: serialized = serialize(container_case_fn(rng, mode, typ)) serialized = serialized + serialized[0:2] yield f'{name}_{mode.to_name()}_last offset_{offset_index}_overflow', \ invalid_test_case(lambda: serialized) if mode == RandomizationMode.mode_one_count: serialized = serialize(container_case_fn(rng, mode, typ)) serialized = serialized + serialized[0:1] yield f'{name}_{mode.to_name()}_last offset_{offset_index}_wrong_byte_length', \ invalid_test_case(lambda: serialized) ","def invalid_cases(): rng = Random(1234) for (name, (typ, offsets)) in PRESET_CONTAINERS.items(): # using mode_max_count, so that the extra byte cannot be picked up as normal list content yield f'{name}_extra_byte', \ invalid_test_case(lambda: serialize( container_case_fn(rng, RandomizationMode.mode_max_count, typ)) + b'\xff') if len(offsets) != 0: # Note: there are many more ways to have invalid offsets, # these are just example to get clients started looking into hardening ssz. for mode in [RandomizationMode.mode_random, RandomizationMode.mode_nil_count, RandomizationMode.mode_one_count, RandomizationMode.mode_max_count]: if len(offsets) != 0: for index, offset_index in enumerate(offsets): yield f'{name}_{mode.to_name()}_offset_{offset_index}_plus_one', \ invalid_test_case(lambda: mod_offset( b=serialize(container_case_fn(rng, mode, typ)), offset_index=offset_index, change=lambda x: x + 1 )) yield f'{name}_{mode.to_name()}_offset_{offset_index}_zeroed', \ invalid_test_case(lambda: mod_offset( b=serialize(container_case_fn(rng, mode, typ)), offset_index=offset_index, change=lambda x: 0 )) if index == 0: yield f'{name}_{mode.to_name()}_first offset_{offset_index}_minus_one', \ invalid_test_case(lambda: mod_offset( b=serialize(container_case_fn(rng, mode, typ)), offset_index=offset_index, change=lambda x: x - 1 )) if mode == RandomizationMode.mode_max_count: serialized = serialize(container_case_fn(rng, mode, typ)) serialized = serialized + serialized[0:2] yield f'{name}_{mode.to_name()}_last offset_{offset_index}_overflow', \ invalid_test_case(lambda: serialized) if mode == RandomizationMode.mode_one_count: serialized = serialize(container_case_fn(rng, mode, typ)) serialized = serialized + serialized[:1] yield f'{name}_{mode.to_name()}_last offset_{offset_index}_wrong_byte_length', \ invalid_test_case(lambda: serialized) " 12468,"def get_nongen_builtins(python_version): # After 3.9 with pep585 generic builtins are allowed. return _nongen_builtins if python_version < (3, 9) else {} ","def get_nongen_builtins(python_version: Tuple[int, int]) -> Dict[str, str]: # After 3.9 with pep585 generic builtins are allowed. return _nongen_builtins if python_version < (3, 9) else {} " 22369,"def print_folders(pad, folder): # For debugging... pad_str = '' for _i in range(1, pad): pad_str += ' ' print(f'{pad_str} id: {folder.id} key: {folder.key}') for repository_dependency in folder.repository_dependencies: print(f' {pad_str}{repository_dependency.listify}') for sub_folder in folder.folders: print_folders(pad + 5, sub_folder) ","def print_folders(pad, folder): # For debugging... pad_str = '' for _i in range(1, pad): pad_str += ' ' print(f'{pad_str}id: {folder.id} key: {folder.key}') for repository_dependency in folder.repository_dependencies: print(f' {pad_str}{repository_dependency.listify}') for sub_folder in folder.folders: print_folders(pad + 5, sub_folder) " 44598,"def main(): parser = argparse.ArgumentParser(description="""") group = parser.add_mutually_exclusive_group() group.add_argument(""--yara"", help=""Yara rule to use for query"") group.add_argument(""--job"", help=""Job ID to print or download"") parser.add_argument( ""--mquery"", default=""http://localhost"", help=""Change mquery server address"", ) parser.add_argument( ""--print-filename"", default=False, action=""store_true"", help=""Also print filenames"", ) parser.add_argument( ""--print-matches"", default=False, action=""store_true"", help=""Also print matched rules"", ) parser.add_argument( ""--save"", default=None, help=""Download samples and save to the provided directory"", ) args = parser.parse_args() output = OutputSettings() output.print_filename = args.print_filename output.print_matches = args.print_matches output.save_to_directory = args.save if args.save is not None: os.makedirs(args.save, exist_ok=True) if args.yara: with open(args.yara, ""r"") as f: yara_rule = f.read() job_id = query(args.mquery, yara_rule) else: job_id = args.job process_job(args.mquery, job_id, output) ","def main(): parser = argparse.ArgumentParser(description="""") group = parser.add_mutually_exclusive_group() group.add_argument(""--yara"", help=""Yara rule to use for query"") group.add_argument(""--job"", help=""Job ID to print or download"") parser.add_argument( ""--mquery"", default=""http://localhost"", help=""Change mquery server address"", ) parser.add_argument( ""--print-filename"", default=False, action=""store_true"", help=""Also print filenames"", ) parser.add_argument( ""--print-matches"", default=False, action=""store_true"", help=""Also print matched rules"", ) parser.add_argument( ""--save"", default=None, help=""Print matched rules"", ) args = parser.parse_args() output = OutputSettings() output.print_filename = args.print_filename output.print_matches = args.print_matches output.save_to_directory = args.save if args.save is not None: os.makedirs(args.save, exist_ok=True) if args.yara: with open(args.yara, ""r"") as f: yara_rule = f.read() job_id = query(args.mquery, yara_rule) else: job_id = args.job process_job(args.mquery, job_id, output) " 38255,"def check_and_apply_update(): check_releases() if not args.release_update: gitconfig() branch = settings.general.branch g = git.cmd.Git(current_working_directory) g.fetch('origin') result = g.diff('--shortstat', 'origin/' + branch) if len(result) == 0: notifications.write(msg='BAZARR No new version of Bazarr available.', queue='check_update') logging.info('BAZARR No new version of Bazarr available.') else: g.reset('--hard', 'HEAD') g.checkout(branch) g.reset('--hard', 'origin/' + branch) g.pull() logging.info('BAZARR Updated to latest version. Restart required. ' + result) updated() else: url = 'https://api.github.com/repos/morpheus65535/bazarr/releases' releases = request_json(url, timeout=20, whitelist_status_code=404, validator=lambda x: type(x) == list) if releases is None: notifications.write(msg='BAZARR Could not get releases from GitHub.', queue='check_update', type='warning') logging.warn('BAZARR Could not get releases from GitHub.') return else: release = releases[0] latest_release = release['tag_name'] if ('v' + os.environ[""BAZARR_VERSION""]) != latest_release and settings.general.branch == 'master': update_from_source() elif settings.general.branch != 'master': notifications.write(msg=""BAZARR Can't update development branch from source"", queue='check_update') # fixme logging.info(""BAZARR Can't update development branch from source"") # fixme else: notifications.write(msg='BAZARR is up to date', queue='check_update') logging.info('BAZARR is up to date') ","def check_and_apply_update(): check_releases() if not args.release_update: gitconfig() branch = settings.general.branch g = git.cmd.Git(current_working_directory) g.fetch('origin') result = g.diff('--shortstat', 'origin/' + branch) if len(result) == 0: notifications.write(msg='No new version of Bazarr available.', queue='check_update') logging.info('BAZARR No new version of Bazarr available.') else: g.reset('--hard', 'HEAD') g.checkout(branch) g.reset('--hard', 'origin/' + branch) g.pull() logging.info('BAZARR Updated to latest version. Restart required. ' + result) updated() else: url = 'https://api.github.com/repos/morpheus65535/bazarr/releases' releases = request_json(url, timeout=20, whitelist_status_code=404, validator=lambda x: type(x) == list) if releases is None: notifications.write(msg='BAZARR Could not get releases from GitHub.', queue='check_update', type='warning') logging.warn('BAZARR Could not get releases from GitHub.') return else: release = releases[0] latest_release = release['tag_name'] if ('v' + os.environ[""BAZARR_VERSION""]) != latest_release and settings.general.branch == 'master': update_from_source() elif settings.general.branch != 'master': notifications.write(msg=""BAZARR Can't update development branch from source"", queue='check_update') # fixme logging.info(""BAZARR Can't update development branch from source"") # fixme else: notifications.write(msg='BAZARR is up to date', queue='check_update') logging.info('BAZARR is up to date') " 44708,"def _make_divisible(v, divisor, min_value=None): """""" 이 함수는 원본 ts repo에서 가져왔습니다. 모든 계층들이 8로 나뉠수 있는 채널 숫자를 가지는 것을 보장합니다. 다음에서 확인할 수 있습니다: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return: """""" if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # 내림은 10% 이상으로 내려가지 않는 것을 보장합니다. if new_v < 0.9 * v: new_v += divisor return new_v ","def _make_divisible(v, divisor, min_value=None): """""" 이 함수는 원본 Tensorflow 저장소에서 가져왔습니다. 모든 계층들이 8로 나뉠수 있는 채널 숫자를 가지는 것을 보장합니다. 다음에서 확인할 수 있습니다: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return: """""" if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # 내림은 10% 이상으로 내려가지 않는 것을 보장합니다. if new_v < 0.9 * v: new_v += divisor return new_v " 47924,"def build_argparser(): parser = ArgumentParser(add_help=False) args = parser.add_argument_group('Options') args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.') args.add_argument(""-m_encoder"", help=""Required. Path to an .xml file with a trained encoder part of the model"", required=True, type=str) args.add_argument(""-m_decoder"", help=""Required. Path to an .xml file with a trained decoder part of the model"", required=True, type=str) args.add_argument(""--interactive"", help=""Optional. Enables interactive mode. In this mode images are read from the web-camera."", action='store_true', default=False) args.add_argument(""-i"", ""--input"", help=""Optional. Path to a folder with images or path to an image files"", required=False, type=str) args.add_argument(""-o"", ""--output_file"", help=""Optional. Path to file where to store output. If not mentioned, result will be stored"" ""in the console."", type=str) args.add_argument(""--vocab_path"", help=""Required. Path to vocab file to construct meaningful phrase"", type=str, required=True) args.add_argument(""--max_formula_len"", help=""Optional. Defines maximum length of the formula (number of tokens to decode)"", default=""128"", type=int) args.add_argument(""--conf_thresh"", help=""Optional. Probability threshold to trat model prediction as meaningful"", default=CONFIDENCE_THRESH, type=float) args.add_argument(""-d"", ""--device"", help=""Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is "" ""acceptable. Sample will look for a suitable plugin for device specified. Default value is CPU"", default=""CPU"", type=str) args.add_argument(""--camera_device"", default=0, type=int, help='Optional. Device id of the web-camera. Change it only if you have more then one camera') args.add_argument(""--resolution"", default=DEFAULT_RESOLUTION, type=int, nargs=2, help=f'Optional. Resolution of the demo application window. Default: {DEFAULT_RESOLUTION}') args.add_argument('--preprocessing_type', choices=PREPROCESSING.keys(), help=""Optional. Type of the preprocessing"", default='crop') args.add_argument('-pc', '--perf_counts', action='store_true', default=False) args.add_argument('--imgs_layer', help='Optional. Encoder input name for images. See README for details.', default='imgs') args.add_argument('--row_enc_out_layer', help='Optional. Encoder output key for row_enc_out. See README for details.', default='row_enc_out') args.add_argument('--hidden_layer', help='Optional. Encoder output key for hidden. See README for details.', default='hidden') args.add_argument('--context_layer', help='Optional. Encoder output key for context. See README for details.', default='context') args.add_argument('--init_0_layer', help='Optional. Encoder output key for init_0. See README for details.', default='init_0') args.add_argument('--dec_st_c_layer', help='Optional. Decoder input key for dec_st_c. See README for details.', default='dec_st_c') args.add_argument('--dec_st_h_layer', help='Optional. Decoder input key for dec_st_h. See README for details.', default='dec_st_h') args.add_argument('--dec_st_c_t_layer', help='Optional. Decoder output key for dec_st_c_t. See README for details.', default='dec_st_c_t') args.add_argument('--dec_st_h_t_layer', help='Optional. Decoder output key for dec_st_h_t. See README for details.', default='dec_st_h_t') args.add_argument('--output_layer', help='Optional. Decoder output key for output. See README for details.', default='output') args.add_argument('--output_prev_layer', help='Optional. Decoder input key for output_prev. See README for details.', default='output_prev') args.add_argument('--logit_layer', help='Optional. Decoder output key for logit. See README for details.', default='logit') args.add_argument('--tgt_layer', help='Optional. Decoder input key for tgt. See README for details.', default='tgt') return parser ","def build_argparser(): parser = ArgumentParser(add_help=False) args = parser.add_argument_group('Options') args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.') args.add_argument(""-m_encoder"", help=""Required. Path to an .xml file with a trained encoder part of the model"", required=True, type=str) args.add_argument(""-m_decoder"", help=""Required. Path to an .xml file with a trained decoder part of the model"", required=True, type=str) args.add_argument(""--interactive"", help=""Optional. Enables interactive mode. In this mode images are read from the web-camera."", action='store_true', default=False) args.add_argument(""-i"", ""--input"", help=""Optional. Path to a folder with images or path to an image files"", required=False, type=str) args.add_argument(""-o"", ""--output_file"", help=""Optional. Path to file where to store output. If not mentioned, result will be stored"" ""in the console."", type=str) args.add_argument(""--vocab_path"", help=""Required. Path to vocab file to construct meaningful phrase"", type=str, required=True) args.add_argument(""--max_formula_len"", help=""Optional. Defines maximum length of the formula (number of tokens to decode)"", default=""128"", type=int) args.add_argument(""--conf_thresh"", help=""Optional. Probability threshold to treat model prediction as meaningful"", default=CONFIDENCE_THRESH, type=float) args.add_argument(""-d"", ""--device"", help=""Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is "" ""acceptable. Sample will look for a suitable plugin for device specified. Default value is CPU"", default=""CPU"", type=str) args.add_argument(""--camera_device"", default=0, type=int, help='Optional. Device id of the web-camera. Change it only if you have more then one camera') args.add_argument(""--resolution"", default=DEFAULT_RESOLUTION, type=int, nargs=2, help=f'Optional. Resolution of the demo application window. Default: {DEFAULT_RESOLUTION}') args.add_argument('--preprocessing_type', choices=PREPROCESSING.keys(), help=""Optional. Type of the preprocessing"", default='crop') args.add_argument('-pc', '--perf_counts', action='store_true', default=False) args.add_argument('--imgs_layer', help='Optional. Encoder input name for images. See README for details.', default='imgs') args.add_argument('--row_enc_out_layer', help='Optional. Encoder output key for row_enc_out. See README for details.', default='row_enc_out') args.add_argument('--hidden_layer', help='Optional. Encoder output key for hidden. See README for details.', default='hidden') args.add_argument('--context_layer', help='Optional. Encoder output key for context. See README for details.', default='context') args.add_argument('--init_0_layer', help='Optional. Encoder output key for init_0. See README for details.', default='init_0') args.add_argument('--dec_st_c_layer', help='Optional. Decoder input key for dec_st_c. See README for details.', default='dec_st_c') args.add_argument('--dec_st_h_layer', help='Optional. Decoder input key for dec_st_h. See README for details.', default='dec_st_h') args.add_argument('--dec_st_c_t_layer', help='Optional. Decoder output key for dec_st_c_t. See README for details.', default='dec_st_c_t') args.add_argument('--dec_st_h_t_layer', help='Optional. Decoder output key for dec_st_h_t. See README for details.', default='dec_st_h_t') args.add_argument('--output_layer', help='Optional. Decoder output key for output. See README for details.', default='output') args.add_argument('--output_prev_layer', help='Optional. Decoder input key for output_prev. See README for details.', default='output_prev') args.add_argument('--logit_layer', help='Optional. Decoder output key for logit. See README for details.', default='logit') args.add_argument('--tgt_layer', help='Optional. Decoder input key for tgt. See README for details.', default='tgt') return parser " 2683,"def test_partial_dependence_display_deprecation( plot_partial_dependence, pyplot, clf_diabetes, diabetes ): """"""Check that we raise the proper warning in the display."""""" disp = plot_partial_dependence( clf_diabetes, diabetes.data, [0, 2], grid_resolution=25, feature_names=diabetes.feature_names, ) deprecation_msg = ""The `pdp_lim` parameter is deprecated"" overwritting_msg = ( ""`pdp_lim` has been passed in both the constructor and the `plot` method"" ) disp.pdp_lim = None # case when constructor and method parameters are the same with pytest.warns(FutureWarning, match=deprecation_msg): disp.plot(pdp_lim=None) # case when constructor and method parameters are different with warnings.catch_warnings(record=True) as record: warnings.simplefilter(""always"") disp.plot(pdp_lim=(0, 1)) assert len(record) == 2 for warning in record: assert warning.message.args[0].startswith((deprecation_msg, overwritting_msg)) ","def test_partial_dependence_display_deprecation( plot_partial_dependence, pyplot, clf_diabetes, diabetes ): """"""Check that we raise the proper warning in the display."""""" disp = plot_partial_dependence( clf_diabetes, diabetes.data, [0, 2], grid_resolution=25, feature_names=diabetes.feature_names, ) deprecation_msg = ""The `pdp_lim` parameter is deprecated"" overwritting_msg = ( ""`pdp_lim` has been passed in both the constructor and the `plot` method"" ) disp.pdp_lim = None # case when constructor and method parameters are the same with pytest.warns(FutureWarning, match=deprecation_msg): disp.plot(pdp_lim=None) # case when constructor and method parameters are different with warnings.catch_warnings(record=True) as record: warnings.simplefilter(""always"", FutureWarning) disp.plot(pdp_lim=(0, 1)) assert len(record) == 2 for warning in record: assert warning.message.args[0].startswith((deprecation_msg, overwritting_msg)) " 22807,"def run(config: configuration.NamespaceConfig, plugins: plugins_disco.PluginsRegistry) -> Optional[str]: """"""Obtain a certificate and install. :param config: Configuration object :type config: configuration.NamespaceConfig :param plugins: List of plugins :type plugins: plugins_disco.PluginsRegistry :returns: `None` :rtype: None """""" # TODO: Make run as close to auth + install as possible # Possible difficulties: config.csr was hacked into auth try: installer, authenticator = plug_sel.choose_configurator_plugins(config, plugins, ""run"") except errors.PluginSelectionError as e: return str(e) if config.must_staple and installer and ""staple-ocsp"" not in installer.supported_enhancements(): raise errors.NotSupportedError(""Must Staple extension requested, but OCSP stapling "" ""is not supported by the selected installer"") # Preflight check for enhancement support by the selected installer if not enhancements.are_supported(config, installer): raise errors.NotSupportedError(""One ore more of the requested enhancements "" ""are not supported by the selected installer"") # TODO: Handle errors from _init_le_client? le_client = _init_le_client(config, authenticator, installer) domains, certname = _find_domains_or_certname(config, installer) should_get_cert, lineage = _find_cert(config, domains, certname) new_lineage = lineage if should_get_cert: new_lineage = _get_and_save_cert(le_client, config, domains, certname, lineage) cert_path = new_lineage.cert_path if new_lineage else None fullchain_path = new_lineage.fullchain_path if new_lineage else None key_path = new_lineage.key_path if new_lineage else None if should_get_cert: _report_new_cert(config, cert_path, fullchain_path, key_path) # The installer error, if any, is being stored as a value here, in order to first print # relevant advice in a nice way, before re-raising the error for normal processing. installer_err: Optional[errors.Error] = None try: _install_cert(config, le_client, domains, new_lineage) if enhancements.are_requested(config) and new_lineage: enhancements.enable(new_lineage, domains, installer, config) if lineage is None or not should_get_cert: display_ops.success_installation(domains) else: display_ops.success_renewal(domains) except errors.Error as e: installer_err = e finally: _report_next_steps(config, installer_err, new_lineage, new_or_renewed_cert=should_get_cert) # If the installer did fail, re-raise the error to bail out if installer_err: raise installer_err _suggest_donation_if_appropriate(config) eff.handle_subscription(config, le_client.account) return None ","def run(config: configuration.NamespaceConfig, plugins: plugins_disco.PluginsRegistry) -> Optional[str]: """"""Obtain a certificate and install. :param config: Configuration object :type config: configuration.NamespaceConfig :param plugins: List of plugins :type plugins: plugins_disco.PluginsRegistry :returns: `None` :rtype: None """""" # TODO: Make run as close to auth + install as possible # Possible difficulties: config.csr was hacked into auth try: installer, authenticator = plug_sel.choose_configurator_plugins(config, plugins, ""run"") except errors.PluginSelectionError as e: return str(e) if config.must_staple and installer and ""staple-ocsp"" not in installer.supported_enhancements(): raise errors.NotSupportedError(""Must-Staple extension requested, but OCSP stapling "" ""is not supported by the selected installer"") # Preflight check for enhancement support by the selected installer if not enhancements.are_supported(config, installer): raise errors.NotSupportedError(""One ore more of the requested enhancements "" ""are not supported by the selected installer"") # TODO: Handle errors from _init_le_client? le_client = _init_le_client(config, authenticator, installer) domains, certname = _find_domains_or_certname(config, installer) should_get_cert, lineage = _find_cert(config, domains, certname) new_lineage = lineage if should_get_cert: new_lineage = _get_and_save_cert(le_client, config, domains, certname, lineage) cert_path = new_lineage.cert_path if new_lineage else None fullchain_path = new_lineage.fullchain_path if new_lineage else None key_path = new_lineage.key_path if new_lineage else None if should_get_cert: _report_new_cert(config, cert_path, fullchain_path, key_path) # The installer error, if any, is being stored as a value here, in order to first print # relevant advice in a nice way, before re-raising the error for normal processing. installer_err: Optional[errors.Error] = None try: _install_cert(config, le_client, domains, new_lineage) if enhancements.are_requested(config) and new_lineage: enhancements.enable(new_lineage, domains, installer, config) if lineage is None or not should_get_cert: display_ops.success_installation(domains) else: display_ops.success_renewal(domains) except errors.Error as e: installer_err = e finally: _report_next_steps(config, installer_err, new_lineage, new_or_renewed_cert=should_get_cert) # If the installer did fail, re-raise the error to bail out if installer_err: raise installer_err _suggest_donation_if_appropriate(config) eff.handle_subscription(config, le_client.account) return None " 8080,"def get_body_heliographic_stonyhurst(body, time='now', observer=None): """""" Return a `~sunpy.coordinates.frames.HeliographicStonyhurst` frame for the location of a solar-system body at a specified time. Parameters ---------- body : `str` The solar-system body for which to calculate positions time : various Time to use as `~astropy.time.Time` or in a parse_time-compatible format observer : `~astropy.coordinates.SkyCoord` If not None, the returned coordinate is the apparent location (i.e., factors in light travel time) Returns ------- out : `~sunpy.coordinates.frames.HeliographicStonyhurst` Location of the solar-system body in the `~sunpy.coordinates.HeliographicStonyhurst` frame """""" obstime = parse_time(time) if observer is None: body_icrs = get_body_barycentric(body, obstime) else: observer_icrs = SkyCoord(observer).icrs.cartesian # This implementation is modeled after Astropy's `_get_apparent_body_position` light_travel_time = 0.*u.s emitted_time = obstime delta_light_travel_time = 1.*u.s # placeholder value while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s): body_icrs = get_body_barycentric(body, emitted_time) distance = (body_icrs - observer_icrs).norm() delta_light_travel_time = light_travel_time - distance / speed_of_light light_travel_time = distance / speed_of_light emitted_time = obstime - light_travel_time print('Apparent location factors in {} seconds of light travel time'.format(light_travel_time.to('s').value)) body_hgs = ICRS(body_icrs).transform_to(HGS(obstime=obstime)) return body_hgs ","def get_body_heliographic_stonyhurst(body, time='now', observer=None): """""" Return a `~sunpy.coordinates.frames.HeliographicStonyhurst` frame for the location of a solar-system body at a specified time. Parameters ---------- body : `str` The solar-system body for which to calculate positions time : various Time to use as `~astropy.time.Time` or in a parse_time-compatible format observer : `~astropy.coordinates.SkyCoord` If not None, the returned coordinate is the apparent location (i.e., factors in light travel time) Returns ------- out : `~sunpy.coordinates.frames.HeliographicStonyhurst` Location of the solar-system body in the `~sunpy.coordinates.HeliographicStonyhurst` frame """""" obstime = parse_time(time) if observer is None: body_icrs = get_body_barycentric(body, obstime) else: observer_icrs = SkyCoord(observer).icrs.cartesian # This implementation is modeled after Astropy's `_get_apparent_body_position` light_travel_time = 0.*u.s emitted_time = obstime delta_light_travel_time = 1.*u.s # placeholder value while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s): body_icrs = get_body_barycentric(body, emitted_time) distance = (body_icrs - observer_icrs).norm() delta_light_travel_time = light_travel_time - distance / speed_of_light light_travel_time = distance / speed_of_light emitted_time = obstime - light_travel_time print('Apparent location factors in {} seconds of light travel time'.format(light_travel_time.to('s').value)) body_hgs = ICRS(body_icrs).transform_to(HGS(obstime=obstime)) return body_hgs " 5197,"def _split_ttc(ttc_path): """"""SPlit a TTC ont into TTF files"""""" res = _read_ttc(ttc_path) ttf_fonts, table_index, table_data = res out_base = Path( mpl.get_cachedir(), os.path.basename(ttc_path) + ""-"" ) return _dump_ttf(out_base, ttf_fonts, table_index, table_data) ","def _split_ttc(ttc_path): """"""Split a TTC file into TTF files"""""" res = _read_ttc(ttc_path) ttf_fonts, table_index, table_data = res out_base = Path( mpl.get_cachedir(), os.path.basename(ttc_path) + ""-"" ) return _dump_ttf(out_base, ttf_fonts, table_index, table_data) " 25926,"def _configure_knack(): """"""Override consts defined in knack to make them Azure CLI-specific."""""" from knack.util import status_tag_messages ref_message = ""Reference and support levels: https://aka.ms/CLI_refstatus"" # Override the preview message status_tag_messages['preview'] = ""{} is in preview and under development. "" + ref_message # Override the experimental message status_tag_messages['experimental'] = ""{} is experimental and under development. "" + ref_message ","def _configure_knack(): """"""Override consts defined in knack to make them Azure CLI-specific."""""" from knack.util import status_tag_messages ref_message = ""Reference and support levels: https://aka.ms/CLI_refstatus"" # Override the preview message status_tag_messages['preview'] = ""{} is in preview. "" + ref_message # Override the experimental message status_tag_messages['experimental'] = ""{} is experimental and under development. "" + ref_message " 392,"def sample_smc( draws=2000, kernel=IMH, *, start=None, model=None, random_seed=None, chains=None, cores=None, compute_convergence_checks=True, return_inferencedata=True, idata_kwargs=None, progressbar=True, **kernel_kwargs, ): r"""""" Sequential Monte Carlo based sampling. Parameters ---------- draws : int, default 2000 The number of samples to draw from the posterior (i.e. last stage). And also the number of independent chains. Defaults to 2000. kernel : class, default `pymc.smc.smc.IMH` SMC_Kernel used. Defaults to :class:`pymc.smc.smc.IMH` (Independent Metropolis Hastings) start : dict, or array of dict, default None Starting point in parameter space. It should be a list of dict with length `chains`. When None (default) the starting point is sampled from the prior distribution. model : Model (optional if in ``with`` context). random_seed : int, array_like of int, RandomState or Generator, optional Random seed(s) used by the sampling steps. If a list, tuple or array of ints is passed, each entry will be used to seed each chain. A ValueError will be raised if the length does not match the number of chains. chains : int, default None The number of chains to sample. Running independent chains is important for some convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever is larger. cores : int, default None The number of chains to run in parallel. If ``None``, set to the number of CPUs in the system. compute_convergence_checks : bool, default True Whether to compute sampler statistics like ``R hat`` and ``effective_n``. Defaults to ``True``. return_inferencedata : bool, default True Whether to return the trace as an InferenceData (True) object or a MultiTrace (False). Defaults to ``True``. idata_kwargs : dict, optional Keyword arguments for :func:`pymc.to_inference_data`. progressbar : bool, optional, default True Whether or not to display a progress bar in the command line. **kernel_kwargs : dict, optional Keyword arguments passed to the SMC_kernel. The default IMH kernel takes the following keywords: threshold : float, default 0.5 Determines the change of beta from stage to stage, i.e. indirectly the number of stages, the higher the value of `threshold` the higher the number of stages. Defaults to 0.5. It should be between 0 and 1. correlation_threshold : float, default 0.01 The lower the value the higher the number of MCMC steps computed automatically. Defaults to 0.01. It should be between 0 and 1. Keyword arguments for other kernels should be checked in the respective docstrings. Notes ----- SMC works by moving through successive stages. At each stage the inverse temperature :math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0 we have the prior distribution and when :math:`\beta` = 1 we have the posterior distribution. So in more general terms, we are always computing samples from a tempered posterior that we can write as: .. math:: p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta) A summary of the algorithm is: 1. Initialize :math:`\beta` at zero and stage at zero. 2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the tempered posterior is the prior). 3. Increase :math:`\beta` in order to make the effective sample size equal some predefined value (we use :math:`Nt`, where :math:`t` is 0.5 by default). 4. Compute a set of N importance weights W. The weights are computed as the ratio of the likelihoods of a sample at stage i+1 and stage i. 5. Obtain :math:`S_{w}` by re-sampling according to W. 6. Use W to compute the mean and covariance for the proposal distribution, a MvNormal. 7. Run N independent MCMC chains, starting each one from a different sample in :math:`S_{w}`. For the IMH kernel, the mean of the proposal distribution is the mean of the previous posterior stage and not the current point in parameter space. 8. The N chains are run until the autocorrelation with the samples from the previous stage stops decreasing given a certain threshold. 9. Repeat from step 3 until :math:`\beta \ge 1`. 10. The final result is a collection of N samples from the posterior. References ---------- .. [Minson2013] Minson, S. E., Simons, M., and Beck, J. L. (2013). ""Bayesian inversion for finite fault earthquake source models I- Theory and algorithm."" Geophysical Journal International, 2013, 194(3), pp.1701-1726. `link `__ .. [Ching2007] Ching, J., and Chen, Y. (2007). ""Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class Selection, and Model Averaging."" J. Eng. Mech., 2007, 133(7), pp. 816-832. doi:10.1061/(ASCE)0733-9399(2007)133:7(816). `link `__ """""" if isinstance(kernel, str) and kernel.lower() in (""abc"", ""metropolis""): warnings.warn( f'The kernel string argument ""{kernel}"" in sample_smc has been deprecated. ' f""It is no longer needed to distinguish between `abc` and `metropolis`"", FutureWarning, stacklevel=2, ) kernel = IMH if kernel_kwargs.pop(""save_sim_data"", None) is not None: warnings.warn( ""save_sim_data has been deprecated. Use pm.sample_posterior_predictive "" ""to obtain the same type of samples."", FutureWarning, stacklevel=2, ) if kernel_kwargs.pop(""save_log_pseudolikelihood"", None) is not None: warnings.warn( ""save_log_pseudolikelihood has been deprecated. This information is "" ""now saved as log_likelihood in models with Simulator distributions."", FutureWarning, stacklevel=2, ) parallel = kernel_kwargs.pop(""parallel"", None) if parallel is not None: warnings.warn( ""The argument parallel is deprecated, use the argument cores instead."", FutureWarning, stacklevel=2, ) if parallel is False: cores = 1 if cores is None: cores = _cpu_count() if chains is None: chains = max(2, cores) else: cores = min(chains, cores) if random_seed == -1: raise FutureWarning( f""random_seed should be a non-negative integer or None, got: {random_seed}"" ""This will raise a ValueError in the Future"" ) random_seed = None if isinstance(random_seed, int) or random_seed is None: rng = np.random.default_rng(seed=random_seed) random_seed = list(rng.integers(2**30, size=chains)) elif isinstance(random_seed, Iterable): if len(random_seed) != chains: raise ValueError(f""Length of seeds ({len(seeds)}) must match number of chains {chains}"") else: raise TypeError(""Invalid value for `random_seed`. Must be tuple, list, int or None"") model = modelcontext(model) _log = logging.getLogger(""pymc"") _log.info(""Initializing SMC sampler..."") _log.info( f""Sampling {chains} chain{'s' if chains > 1 else ''} "" f""in {cores} job{'s' if cores > 1 else ''}"" ) params = ( draws, kernel, start, model, ) t1 = time.time() if cores > 1: results = run_chains_parallel( chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs, cores ) else: results = run_chains_sequential( chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs ) ( traces, sample_stats, sample_settings, ) = zip(*results) trace = MultiTrace(traces) _t_sampling = time.time() - t1 sample_stats, idata = _save_sample_stats( sample_settings, sample_stats, chains, trace, return_inferencedata, _t_sampling, idata_kwargs, model, ) if compute_convergence_checks: _compute_convergence_checks(idata, draws, model, trace) return idata if return_inferencedata else trace ","def sample_smc( draws=2000, kernel=IMH, *, start=None, model=None, random_seed=None, chains=None, cores=None, compute_convergence_checks=True, return_inferencedata=True, idata_kwargs=None, progressbar=True, **kernel_kwargs, ): r"""""" Sequential Monte Carlo based sampling. Parameters ---------- draws : int, default 2000 The number of samples to draw from the posterior (i.e. last stage). And also the number of independent chains. Defaults to 2000. kernel : class, default `pymc.smc.smc.IMH` SMC_Kernel used. Defaults to :class:`pymc.smc.smc.IMH` (Independent Metropolis Hastings) start : dict, or array of dict, default None Starting point in parameter space. It should be a list of dict with length `chains`. When None (default) the starting point is sampled from the prior distribution. model : Model (optional if in ``with`` context). random_seed : int, array_like of int, RandomState or Generator, optional Random seed(s) used by the sampling steps. If a list, tuple or array of ints is passed, each entry will be used to seed each chain. A ValueError will be raised if the length does not match the number of chains. chains : int, optional The number of chains to sample. Running independent chains is important for some convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever is larger. cores : int, default None The number of chains to run in parallel. If ``None``, set to the number of CPUs in the system. compute_convergence_checks : bool, default True Whether to compute sampler statistics like ``R hat`` and ``effective_n``. Defaults to ``True``. return_inferencedata : bool, default True Whether to return the trace as an InferenceData (True) object or a MultiTrace (False). Defaults to ``True``. idata_kwargs : dict, optional Keyword arguments for :func:`pymc.to_inference_data`. progressbar : bool, optional, default True Whether or not to display a progress bar in the command line. **kernel_kwargs : dict, optional Keyword arguments passed to the SMC_kernel. The default IMH kernel takes the following keywords: threshold : float, default 0.5 Determines the change of beta from stage to stage, i.e. indirectly the number of stages, the higher the value of `threshold` the higher the number of stages. Defaults to 0.5. It should be between 0 and 1. correlation_threshold : float, default 0.01 The lower the value the higher the number of MCMC steps computed automatically. Defaults to 0.01. It should be between 0 and 1. Keyword arguments for other kernels should be checked in the respective docstrings. Notes ----- SMC works by moving through successive stages. At each stage the inverse temperature :math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0 we have the prior distribution and when :math:`\beta` = 1 we have the posterior distribution. So in more general terms, we are always computing samples from a tempered posterior that we can write as: .. math:: p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta) A summary of the algorithm is: 1. Initialize :math:`\beta` at zero and stage at zero. 2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the tempered posterior is the prior). 3. Increase :math:`\beta` in order to make the effective sample size equal some predefined value (we use :math:`Nt`, where :math:`t` is 0.5 by default). 4. Compute a set of N importance weights W. The weights are computed as the ratio of the likelihoods of a sample at stage i+1 and stage i. 5. Obtain :math:`S_{w}` by re-sampling according to W. 6. Use W to compute the mean and covariance for the proposal distribution, a MvNormal. 7. Run N independent MCMC chains, starting each one from a different sample in :math:`S_{w}`. For the IMH kernel, the mean of the proposal distribution is the mean of the previous posterior stage and not the current point in parameter space. 8. The N chains are run until the autocorrelation with the samples from the previous stage stops decreasing given a certain threshold. 9. Repeat from step 3 until :math:`\beta \ge 1`. 10. The final result is a collection of N samples from the posterior. References ---------- .. [Minson2013] Minson, S. E., Simons, M., and Beck, J. L. (2013). ""Bayesian inversion for finite fault earthquake source models I- Theory and algorithm."" Geophysical Journal International, 2013, 194(3), pp.1701-1726. `link `__ .. [Ching2007] Ching, J., and Chen, Y. (2007). ""Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class Selection, and Model Averaging."" J. Eng. Mech., 2007, 133(7), pp. 816-832. doi:10.1061/(ASCE)0733-9399(2007)133:7(816). `link `__ """""" if isinstance(kernel, str) and kernel.lower() in (""abc"", ""metropolis""): warnings.warn( f'The kernel string argument ""{kernel}"" in sample_smc has been deprecated. ' f""It is no longer needed to distinguish between `abc` and `metropolis`"", FutureWarning, stacklevel=2, ) kernel = IMH if kernel_kwargs.pop(""save_sim_data"", None) is not None: warnings.warn( ""save_sim_data has been deprecated. Use pm.sample_posterior_predictive "" ""to obtain the same type of samples."", FutureWarning, stacklevel=2, ) if kernel_kwargs.pop(""save_log_pseudolikelihood"", None) is not None: warnings.warn( ""save_log_pseudolikelihood has been deprecated. This information is "" ""now saved as log_likelihood in models with Simulator distributions."", FutureWarning, stacklevel=2, ) parallel = kernel_kwargs.pop(""parallel"", None) if parallel is not None: warnings.warn( ""The argument parallel is deprecated, use the argument cores instead."", FutureWarning, stacklevel=2, ) if parallel is False: cores = 1 if cores is None: cores = _cpu_count() if chains is None: chains = max(2, cores) else: cores = min(chains, cores) if random_seed == -1: raise FutureWarning( f""random_seed should be a non-negative integer or None, got: {random_seed}"" ""This will raise a ValueError in the Future"" ) random_seed = None if isinstance(random_seed, int) or random_seed is None: rng = np.random.default_rng(seed=random_seed) random_seed = list(rng.integers(2**30, size=chains)) elif isinstance(random_seed, Iterable): if len(random_seed) != chains: raise ValueError(f""Length of seeds ({len(seeds)}) must match number of chains {chains}"") else: raise TypeError(""Invalid value for `random_seed`. Must be tuple, list, int or None"") model = modelcontext(model) _log = logging.getLogger(""pymc"") _log.info(""Initializing SMC sampler..."") _log.info( f""Sampling {chains} chain{'s' if chains > 1 else ''} "" f""in {cores} job{'s' if cores > 1 else ''}"" ) params = ( draws, kernel, start, model, ) t1 = time.time() if cores > 1: results = run_chains_parallel( chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs, cores ) else: results = run_chains_sequential( chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs ) ( traces, sample_stats, sample_settings, ) = zip(*results) trace = MultiTrace(traces) _t_sampling = time.time() - t1 sample_stats, idata = _save_sample_stats( sample_settings, sample_stats, chains, trace, return_inferencedata, _t_sampling, idata_kwargs, model, ) if compute_convergence_checks: _compute_convergence_checks(idata, draws, model, trace) return idata if return_inferencedata else trace " 38516,"def star_shape_cell_centers(g: ""pp.Grid"", as_nan: bool = False) -> np.ndarray: """""" For a given grid compute the star shape center for each cell. The algorithm computes the half space intersections, by using the above method half_space_pt, of the spaces defined by the cell faces and the face normals. This is a wrapper method that operate on a grid. Parameters ---------- g: pp.Grid the grid as_nan: bool, optional Decide whether, in case some cells are not star-shaped return nan as new center. Otherwise an exception is raised (default behaviour). Returns ------- np.ndarray The new cell centers. """""" # no need for 1d or 0d grids if g.dim < 2: return g.cell_centers # retrieve the faces and nodes faces, _, sgn = sps.find(g.cell_faces) nodes, _, _ = sps.find(g.face_nodes) # shift the nodes close to the origin, to avoid numerical problems when coordinates are # too big xn = g.nodes.copy() xn_shift = np.average(xn, axis=1) xn -= np.tile(xn_shift, (xn.shape[1], 1)).T # compute the star shape cell centers by constructing the half spaces of each cell # given by its faces and related normals cell_centers = np.zeros((3, g.num_cells)) for c in np.arange(g.num_cells): loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1]) faces_loc = faces[loc] loc_n = g.face_nodes.indptr[faces_loc] # make the normals coherent normal = np.multiply( sgn[loc], np.divide(g.face_normals[:, faces_loc], g.face_areas[faces_loc]) ) x0, x1 = xn[:, nodes[loc_n]], xn[:, nodes[loc_n + 1]] coords = np.concatenate((x0, x1), axis=1) # compute a point in the half space intersection of all cell faces try: cell_centers[:, c] = pp.half_space.half_space_interior_point( normal, (x1 + x0) / 2.0, coords ) except ValueError: # the cell is not star-shaped if as_nan: cell_centers[:, c] = np.array([np.nan, np.nan, np.nan]) else: raise ValueError( ""Cell not star-shaped impossible to compute the centre"" ) # shift back the computed cell centers and return them return cell_centers + np.tile(xn_shift, (g.num_cells, 1)).T ","def star_shape_cell_centers(g: ""pp.Grid"", as_nan: bool = False) -> np.ndarray: """""" For a given grid compute the star shape center for each cell. The algorithm computes the half space intersections, by using the above method half_space_pt, of the spaces defined by the cell faces and the face normals. This is a wrapper method that operate on a grid. Parameters ---------- g: pp.Grid the grid as_nan: bool, optional Decide whether, in case some cells are not star-shaped return nan as new center. Otherwise an exception is raised (default behaviour). Returns ------- np.ndarray The new cell centers. """""" # no need for 1d or 0d grids if g.dim < 2: return g.cell_centers # retrieve the faces and nodes faces, _, sgn = sps.find(g.cell_faces) nodes, _, _ = sps.find(g.face_nodes) # Shift the nodes close to the origin to avoid numerical problems when coordinates are # too big xn = g.nodes.copy() xn_shift = np.average(xn, axis=1) xn -= np.tile(xn_shift, (xn.shape[1], 1)).T # compute the star shape cell centers by constructing the half spaces of each cell # given by its faces and related normals cell_centers = np.zeros((3, g.num_cells)) for c in np.arange(g.num_cells): loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1]) faces_loc = faces[loc] loc_n = g.face_nodes.indptr[faces_loc] # make the normals coherent normal = np.multiply( sgn[loc], np.divide(g.face_normals[:, faces_loc], g.face_areas[faces_loc]) ) x0, x1 = xn[:, nodes[loc_n]], xn[:, nodes[loc_n + 1]] coords = np.concatenate((x0, x1), axis=1) # compute a point in the half space intersection of all cell faces try: cell_centers[:, c] = pp.half_space.half_space_interior_point( normal, (x1 + x0) / 2.0, coords ) except ValueError: # the cell is not star-shaped if as_nan: cell_centers[:, c] = np.array([np.nan, np.nan, np.nan]) else: raise ValueError( ""Cell not star-shaped impossible to compute the centre"" ) # shift back the computed cell centers and return them return cell_centers + np.tile(xn_shift, (g.num_cells, 1)).T " 54194,"def _get_parser(): """""" Parses command line inputs for tedana Returns ------- parser.parse_args() : argparse dict """""" parser = argparse.ArgumentParser() # Argument parser follow templtate provided by RalphyZ # https://stackoverflow.com/a/43456577 optional = parser._action_groups.pop() required = parser.add_argument_group('required arguments') required.add_argument('-d', dest='data', nargs='+', metavar='FILE', type=lambda x: is_valid_file(parser, x), help=('Multi-echo dataset for analysis. May be a ' 'single file with spatially concatenated data ' 'or a set of echo-specific files, in the same ' 'order as the TEs are listed in the -e ' 'argument.'), required=True) required.add_argument('-e', dest='tes', nargs='+', metavar='TE', type=float, help='Echo times (in ms). E.g., 15.0 39.0 63.0', required=True) optional.add_argument('--mask', dest='mask', metavar='FILE', type=lambda x: is_valid_file(parser, x), help=(""Binary mask of voxels to include in TE "" ""Dependent ANAlysis. Must be in the same "" ""space as `data`. If an explicit mask is not "" ""provided, then Nilearn's compute_epi_mask "" ""function will be used to derive a mask "" ""from the first echo's data.""), default=None) optional.add_argument('--mix', dest='mixm', metavar='FILE', type=lambda x: is_valid_file(parser, x), help=('File containing mixing matrix. If not ' 'provided, ME-PCA & ME-ICA is done.'), default=None) optional.add_argument('--ctab', dest='ctab', metavar='FILE', type=lambda x: is_valid_file(parser, x), help=('File containing a component table from which ' 'to extract pre-computed classifications.'), default=None) optional.add_argument('--manacc', dest='manacc', help=('Comma separated list of manually ' 'accepted components'), default=None) optional.add_argument('--sourceTEs', dest='source_tes', type=str, help=('Source TEs for models. E.g., 0 for all, ' '-1 for opt. com., and 1,2 for just TEs 1 and ' '2. Default=-1.'), default=-1) optional.add_argument('--combmode', dest='combmode', action='store', choices=['t2s'], help=('Combination scheme for TEs: ' 't2s (Posse 1999, default)'), default='t2s') optional.add_argument('--verbose', dest='verbose', action='store_true', help='Generate intermediate and additional files.', default=False) optional.add_argument('--tedort', dest='tedort', action='store_true', help=('Orthogonalize rejected components w.r.t. ' 'accepted components prior to denoising.'), default=False) optional.add_argument('--gscontrol', dest='gscontrol', required=False, action='store', nargs='+', help=('Perform additional denoising to remove ' 'spatially diffuse noise. Default is None. ' 'This argument can be single value or a space ' 'delimited list'), choices=['t1c', 'gsr'], default=None) optional.add_argument('--tedpca', dest='tedpca', help='Method with which to select components in TEDPCA', choices=['mle', 'kundu', 'kundu-stabilize'], default='mle') optional.add_argument('--out-dir', dest='out_dir', type=str, help='Output directory.', default='.') optional.add_argument('--seed', dest='fixed_seed', type=int, help=('Value passed to repr(mdp.numx_rand.seed()) ' 'Set to an integer value for reproducible ICA results; ' 'otherwise, set to 42 for varying results across calls.'), default=42) optional.add_argument('--png', dest='png', action='store_true', help=('Creates a figures folder with static component ' 'maps, timecourse plots and other diagnostic ' 'images'), default=False) optional.add_argument('--png-cmap', dest='png_cmap', type=str, help=('Colormap for figures'), default='coolwarm') optional.add_argument('--maxit', dest='maxit', type=int, help=('Maximum number of iterations for ICA.'), default=500) optional.add_argument('--maxrestart', dest='maxrestart', type=int, help=('Maximum number of attempts for ICA. If ICA ' 'fails to converge, the fixed seed will be ' 'updated and ICA will be run again. If ' 'convergence is achieved before maxrestart ' 'attempts, ICA will finish early.'), default=10) optional.add_argument('--debug', dest='debug', help=argparse.SUPPRESS, action='store_true', default=False) optional.add_argument('--quiet', dest='quiet', help=argparse.SUPPRESS, action='store_true', default=False) parser._action_groups.append(optional) return parser ","def _get_parser(): """""" Parses command line inputs for tedana Returns ------- parser.parse_args() : argparse dict """""" parser = argparse.ArgumentParser() # Argument parser follow templtate provided by RalphyZ # https://stackoverflow.com/a/43456577 optional = parser._action_groups.pop() required = parser.add_argument_group('required arguments') required.add_argument('-d', dest='data', nargs='+', metavar='FILE', type=lambda x: is_valid_file(parser, x), help=('Multi-echo dataset for analysis. May be a ' 'single file with spatially concatenated data ' 'or a set of echo-specific files, in the same ' 'order as the TEs are listed in the -e ' 'argument.'), required=True) required.add_argument('-e', dest='tes', nargs='+', metavar='TE', type=float, help='Echo times (in ms). E.g., 15.0 39.0 63.0', required=True) optional.add_argument('--mask', dest='mask', metavar='FILE', type=lambda x: is_valid_file(parser, x), help=(""Binary mask of voxels to include in TE "" ""Dependent ANAlysis. Must be in the same "" ""space as `data`. If an explicit mask is not "" ""provided, then Nilearn's compute_epi_mask "" ""function will be used to derive a mask "" ""from the first echo's data.""), default=None) optional.add_argument('--mix', dest='mixm', metavar='FILE', type=lambda x: is_valid_file(parser, x), help=('File containing mixing matrix. If not ' 'provided, ME-PCA & ME-ICA is done.'), default=None) optional.add_argument('--ctab', dest='ctab', metavar='FILE', type=lambda x: is_valid_file(parser, x), help=('File containing a component table from which ' 'to extract pre-computed classifications.'), default=None) optional.add_argument('--manacc', dest='manacc', help=('Comma separated list of manually ' 'accepted components'), default=None) optional.add_argument('--sourceTEs', dest='source_tes', type=str, help=('Source TEs for models. E.g., 0 for all, ' '-1 for opt. com., and 1,2 for just TEs 1 and ' '2. Default=-1.'), default=-1) optional.add_argument('--combmode', dest='combmode', action='store', choices=['t2s'], help=('Combination scheme for TEs: ' 't2s (Posse 1999, default)'), default='t2s') optional.add_argument('--verbose', dest='verbose', action='store_true', help='Generate intermediate and additional files.', default=False) optional.add_argument('--tedort', dest='tedort', action='store_true', help=('Orthogonalize rejected components w.r.t. ' 'accepted components prior to denoising.'), default=False) optional.add_argument('--gscontrol', dest='gscontrol', required=False, action='store', nargs='+', help=('Perform additional denoising to remove ' 'spatially diffuse noise. Default is None. ' 'This argument can be single value or a space ' 'delimited list'), choices=['t1c', 'gsr'], default=None) optional.add_argument('--tedpca', dest='tedpca', help='Method with which to select components in TEDPCA', choices=['mle', 'kundu', 'kundu-stabilize'], default='mle') optional.add_argument('--out-dir', dest='out_dir', type=str, help='Output directory.', default='.') optional.add_argument('--seed', dest='fixed_seed', type=int, help=('Value passed to repr(mdp.numx_rand.seed()) ' 'Set to an integer value for reproducible ICA results; ' 'otherwise, set to -1 for varying results across calls. ' 'Default value is 42.'), default=42) optional.add_argument('--png', dest='png', action='store_true', help=('Creates a figures folder with static component ' 'maps, timecourse plots and other diagnostic ' 'images'), default=False) optional.add_argument('--png-cmap', dest='png_cmap', type=str, help=('Colormap for figures'), default='coolwarm') optional.add_argument('--maxit', dest='maxit', type=int, help=('Maximum number of iterations for ICA.'), default=500) optional.add_argument('--maxrestart', dest='maxrestart', type=int, help=('Maximum number of attempts for ICA. If ICA ' 'fails to converge, the fixed seed will be ' 'updated and ICA will be run again. If ' 'convergence is achieved before maxrestart ' 'attempts, ICA will finish early.'), default=10) optional.add_argument('--debug', dest='debug', help=argparse.SUPPRESS, action='store_true', default=False) optional.add_argument('--quiet', dest='quiet', help=argparse.SUPPRESS, action='store_true', default=False) parser._action_groups.append(optional) return parser " 8985,"def rule_lazy(*loaders): """"""Decorate a callable as a rule with lazy loading. :param loaders: one or more functions to generate a list of **compiled** regexes to match URLs. :type loaders: :term:`function` Each ``loader`` function must accept a ``settings`` parameter and return a list (or tuple) of **compiled** regular expressions:: import re def loader(settings): return [re.compile(r'')] It will be called by Sopel when the bot parses the plugin to register rules to get its regexes. The ``settings`` argument will be the bot's :class:`sopel.config.Config` object. If any of the ``loader`` functions raises a :exc:`~sopel.plugins.exceptions.PluginError` exception, the rule will be ignored; it will not fail the plugin's loading. The decorated function will behave like any other :func:`callable`:: from sopel import plugin @plugin.rule_lazy(loader) def my_rule_handler(bot, trigger): bot.say('Rule triggered by: %s' % trigger.group(0)) .. versionadded:: 7.1 .. seealso:: When more than one loader is provided, they will be chained together with the :func:`sopel.tools.chain_loaders` function. """""" def decorator(function): function._sopel_callable = True if not hasattr(function, 'rule_lazy_loaders'): function.rule_lazy_loaders = [] function.rule_lazy_loaders.extend(loaders) return function return decorator ","def rule_lazy(*loaders): """"""Decorate a callable as a rule with lazy loading. :param loaders: one or more functions to generate a list of **compiled** regexes to match URLs :type loaders: :term:`function` Each ``loader`` function must accept a ``settings`` parameter and return a list (or tuple) of **compiled** regular expressions:: import re def loader(settings): return [re.compile(r'')] It will be called by Sopel when the bot parses the plugin to register rules to get its regexes. The ``settings`` argument will be the bot's :class:`sopel.config.Config` object. If any of the ``loader`` functions raises a :exc:`~sopel.plugins.exceptions.PluginError` exception, the rule will be ignored; it will not fail the plugin's loading. The decorated function will behave like any other :func:`callable`:: from sopel import plugin @plugin.rule_lazy(loader) def my_rule_handler(bot, trigger): bot.say('Rule triggered by: %s' % trigger.group(0)) .. versionadded:: 7.1 .. seealso:: When more than one loader is provided, they will be chained together with the :func:`sopel.tools.chain_loaders` function. """""" def decorator(function): function._sopel_callable = True if not hasattr(function, 'rule_lazy_loaders'): function.rule_lazy_loaders = [] function.rule_lazy_loaders.extend(loaders) return function return decorator " 45306,"def train( params: Dict, dtrain: DMatrix, *args, evals=(), num_actors: Optional[int] = None, evals_result: Optional[Dict] = None, **kwargs, ): """"""Run distributed training of XGBoost model. During work it evenly distributes `dtrain` between workers according to IP addresses partitions (in case of not evenly distribution of `dtrain` is possible by IPs, part of partitions will be re-distributed between nodes), runs xgb.train on each worker for subset of `dtrain` and reduce training results of each worker using Rabit Context. Parameters ---------- params : dict Booster params. dtrain : modin.experimental.DMatrix Data to be trained against. *args : iterable Other parameters for `xgboost.train`. evals: list of pairs (modin.experimental.DMatrix, string), optional. Default is empty List of validation sets for which metrics will evaluated during training. Validation metrics will help us track the performance of the model. num_actors : int, optional. Default is None Number of actors for training. If it's None, this value will be computed automatically. evals_result : dict, optional. Default is None Dict to store evaluation results in. **kwargs : dict Other parameters are the same as `xgboost.train`. Returns ------- modin.experimental.xgboost.Booster A trained booster. """""" LOGGER.info(""Training started"") if Engine.get() == ""Ray"": from .xgboost_ray import _train else: raise ValueError(""Current version supports only Ray engine."") assert isinstance( dtrain, DMatrix ), f""Type of `dtrain` is {type(dtrain)}, but expected {DMatrix}."" result = _train(dtrain, num_actors, params, *args, evals=evals, **kwargs) if isinstance(evals_result, dict): evals_result.update(result[""history""]) LOGGER.info(""Training finished"") return Booster(model_file=result[""booster""]) ","def train( params: Dict, dtrain: DMatrix, *args, evals=(), num_actors: Optional[int] = None, evals_result: Optional[Dict] = None, **kwargs, ): """"""Run distributed training of XGBoost model. During work it evenly distributes `dtrain` between workers according to IP addresses partitions (in case of not evenly distribution of `dtrain` is possible by IPs, part of partitions will be re-distributed between nodes), runs xgb.train on each worker for subset of `dtrain` and reduce training results of each worker using Rabit Context. Parameters ---------- params : dict Booster params. dtrain : modin.experimental.xgboost.DMatrix Data to be trained against. *args : iterable Other parameters for `xgboost.train`. evals: list of pairs (modin.experimental.DMatrix, string), optional. Default is empty List of validation sets for which metrics will evaluated during training. Validation metrics will help us track the performance of the model. num_actors : int, optional. Default is None Number of actors for training. If it's None, this value will be computed automatically. evals_result : dict, optional. Default is None Dict to store evaluation results in. **kwargs : dict Other parameters are the same as `xgboost.train`. Returns ------- modin.experimental.xgboost.Booster A trained booster. """""" LOGGER.info(""Training started"") if Engine.get() == ""Ray"": from .xgboost_ray import _train else: raise ValueError(""Current version supports only Ray engine."") assert isinstance( dtrain, DMatrix ), f""Type of `dtrain` is {type(dtrain)}, but expected {DMatrix}."" result = _train(dtrain, num_actors, params, *args, evals=evals, **kwargs) if isinstance(evals_result, dict): evals_result.update(result[""history""]) LOGGER.info(""Training finished"") return Booster(model_file=result[""booster""]) " 13409,"def test_01_verify_system_dataset_is_set_to_boot_pool(): results = GET(""/systemdataset/"") assert results.status_code == 200, results.text assert isinstance(results.json(), dict), results.text assert results.json()['pool'] == 'boot-pool', results.text assert results.json()['basename'] == 'boot-pool/.system', results.text ","def test_01_verify_sysds_is_set_to_boot_pool(): results = GET(""/systemdataset/"") assert results.status_code == 200, results.text assert isinstance(results.json(), dict), results.text assert results.json()['pool'] == 'boot-pool', results.text assert results.json()['basename'] == 'boot-pool/.system', results.text " 391,"def sample_smc( draws=2000, kernel=IMH, *, start=None, model=None, random_seed=None, chains=None, cores=None, compute_convergence_checks=True, return_inferencedata=True, idata_kwargs=None, progressbar=True, **kernel_kwargs, ): r"""""" Sequential Monte Carlo based sampling. Parameters ---------- draws : int, default 2000 The number of samples to draw from the posterior (i.e. last stage). And also the number of independent chains. Defaults to 2000. kernel : class, default `pymc.smc.smc.IMH` SMC_Kernel used. Defaults to :class:`pymc.smc.smc.IMH` (Independent Metropolis Hastings) start : dict, or array of dict, default None Starting point in parameter space. It should be a list of dict with length `chains`. When None (default) the starting point is sampled from the prior distribution. model : Model (optional if in ``with`` context). random_seed : int, array_like of int, RandomState or Generator, optional Random seed(s) used by the sampling steps. If a list, tuple or array of ints is passed, each entry will be used to seed each chain. A ValueError will be raised if the length does not match the number of chains. chains : int, default None The number of chains to sample. Running independent chains is important for some convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever is larger. cores : int, default None The number of chains to run in parallel. If ``None``, set to the number of CPUs in the system. compute_convergence_checks : bool, default True Whether to compute sampler statistics like ``R hat`` and ``effective_n``. Defaults to ``True``. return_inferencedata : bool, default True Whether to return the trace as an InferenceData (True) object or a MultiTrace (False). Defaults to ``True``. idata_kwargs : dict, optional Keyword arguments for :func:`pymc.to_inference_data`. progressbar : bool, optional, default True Whether or not to display a progress bar in the command line. **kernel_kwargs : dict, optional Keyword arguments passed to the SMC_kernel. The default IMH kernel takes the following keywords: threshold : float, default 0.5 Determines the change of beta from stage to stage, i.e. indirectly the number of stages, the higher the value of `threshold` the higher the number of stages. Defaults to 0.5. It should be between 0 and 1. correlation_threshold : float, default 0.01 The lower the value the higher the number of MCMC steps computed automatically. Defaults to 0.01. It should be between 0 and 1. Keyword arguments for other kernels should be checked in the respective docstrings. Notes ----- SMC works by moving through successive stages. At each stage the inverse temperature :math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0 we have the prior distribution and when :math:`\beta` = 1 we have the posterior distribution. So in more general terms, we are always computing samples from a tempered posterior that we can write as: .. math:: p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta) A summary of the algorithm is: 1. Initialize :math:`\beta` at zero and stage at zero. 2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the tempered posterior is the prior). 3. Increase :math:`\beta` in order to make the effective sample size equal some predefined value (we use :math:`Nt`, where :math:`t` is 0.5 by default). 4. Compute a set of N importance weights W. The weights are computed as the ratio of the likelihoods of a sample at stage i+1 and stage i. 5. Obtain :math:`S_{w}` by re-sampling according to W. 6. Use W to compute the mean and covariance for the proposal distribution, a MvNormal. 7. Run N independent MCMC chains, starting each one from a different sample in :math:`S_{w}`. For the IMH kernel, the mean of the proposal distribution is the mean of the previous posterior stage and not the current point in parameter space. 8. The N chains are run until the autocorrelation with the samples from the previous stage stops decreasing given a certain threshold. 9. Repeat from step 3 until :math:`\beta \ge 1`. 10. The final result is a collection of N samples from the posterior. References ---------- .. [Minson2013] Minson, S. E., Simons, M., and Beck, J. L. (2013). ""Bayesian inversion for finite fault earthquake source models I- Theory and algorithm."" Geophysical Journal International, 2013, 194(3), pp.1701-1726. `link `__ .. [Ching2007] Ching, J., and Chen, Y. (2007). ""Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class Selection, and Model Averaging."" J. Eng. Mech., 2007, 133(7), pp. 816-832. doi:10.1061/(ASCE)0733-9399(2007)133:7(816). `link `__ """""" if isinstance(kernel, str) and kernel.lower() in (""abc"", ""metropolis""): warnings.warn( f'The kernel string argument ""{kernel}"" in sample_smc has been deprecated. ' f""It is no longer needed to distinguish between `abc` and `metropolis`"", FutureWarning, stacklevel=2, ) kernel = IMH if kernel_kwargs.pop(""save_sim_data"", None) is not None: warnings.warn( ""save_sim_data has been deprecated. Use pm.sample_posterior_predictive "" ""to obtain the same type of samples."", FutureWarning, stacklevel=2, ) if kernel_kwargs.pop(""save_log_pseudolikelihood"", None) is not None: warnings.warn( ""save_log_pseudolikelihood has been deprecated. This information is "" ""now saved as log_likelihood in models with Simulator distributions."", FutureWarning, stacklevel=2, ) parallel = kernel_kwargs.pop(""parallel"", None) if parallel is not None: warnings.warn( ""The argument parallel is deprecated, use the argument cores instead."", FutureWarning, stacklevel=2, ) if parallel is False: cores = 1 if cores is None: cores = _cpu_count() if chains is None: chains = max(2, cores) else: cores = min(chains, cores) if random_seed == -1: raise FutureWarning( f""random_seed should be a non-negative integer or None, got: {random_seed}"" ""This will raise a ValueError in the Future"" ) random_seed = None if isinstance(random_seed, int) or random_seed is None: rng = np.random.default_rng(seed=random_seed) random_seed = list(rng.integers(2**30, size=chains)) elif isinstance(random_seed, Iterable): if len(random_seed) != chains: raise ValueError(f""Length of seeds ({len(seeds)}) must match number of chains {chains}"") else: raise TypeError(""Invalid value for `random_seed`. Must be tuple, list, int or None"") model = modelcontext(model) _log = logging.getLogger(""pymc"") _log.info(""Initializing SMC sampler..."") _log.info( f""Sampling {chains} chain{'s' if chains > 1 else ''} "" f""in {cores} job{'s' if cores > 1 else ''}"" ) params = ( draws, kernel, start, model, ) t1 = time.time() if cores > 1: results = run_chains_parallel( chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs, cores ) else: results = run_chains_sequential( chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs ) ( traces, sample_stats, sample_settings, ) = zip(*results) trace = MultiTrace(traces) _t_sampling = time.time() - t1 sample_stats, idata = _save_sample_stats( sample_settings, sample_stats, chains, trace, return_inferencedata, _t_sampling, idata_kwargs, model, ) if compute_convergence_checks: _compute_convergence_checks(idata, draws, model, trace) return idata if return_inferencedata else trace ","def sample_smc( draws=2000, kernel=IMH, *, start=None, model=None, random_seed=None, chains=None, cores=None, compute_convergence_checks=True, return_inferencedata=True, idata_kwargs=None, progressbar=True, **kernel_kwargs, ): r"""""" Sequential Monte Carlo based sampling. Parameters ---------- draws : int, default 2000 The number of samples to draw from the posterior (i.e. last stage). And also the number of independent chains. Defaults to 2000. kernel : class, default `pymc.smc.smc.IMH` SMC_Kernel used. Defaults to :class:`pymc.smc.smc.IMH` (Independent Metropolis Hastings) start : dict or array of dict, optional Starting point in parameter space. It should be a list of dict with length `chains`. When None (default) the starting point is sampled from the prior distribution. model : Model (optional if in ``with`` context). random_seed : int, array_like of int, RandomState or Generator, optional Random seed(s) used by the sampling steps. If a list, tuple or array of ints is passed, each entry will be used to seed each chain. A ValueError will be raised if the length does not match the number of chains. chains : int, default None The number of chains to sample. Running independent chains is important for some convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever is larger. cores : int, default None The number of chains to run in parallel. If ``None``, set to the number of CPUs in the system. compute_convergence_checks : bool, default True Whether to compute sampler statistics like ``R hat`` and ``effective_n``. Defaults to ``True``. return_inferencedata : bool, default True Whether to return the trace as an InferenceData (True) object or a MultiTrace (False). Defaults to ``True``. idata_kwargs : dict, optional Keyword arguments for :func:`pymc.to_inference_data`. progressbar : bool, optional, default True Whether or not to display a progress bar in the command line. **kernel_kwargs : dict, optional Keyword arguments passed to the SMC_kernel. The default IMH kernel takes the following keywords: threshold : float, default 0.5 Determines the change of beta from stage to stage, i.e. indirectly the number of stages, the higher the value of `threshold` the higher the number of stages. Defaults to 0.5. It should be between 0 and 1. correlation_threshold : float, default 0.01 The lower the value the higher the number of MCMC steps computed automatically. Defaults to 0.01. It should be between 0 and 1. Keyword arguments for other kernels should be checked in the respective docstrings. Notes ----- SMC works by moving through successive stages. At each stage the inverse temperature :math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0 we have the prior distribution and when :math:`\beta` = 1 we have the posterior distribution. So in more general terms, we are always computing samples from a tempered posterior that we can write as: .. math:: p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta) A summary of the algorithm is: 1. Initialize :math:`\beta` at zero and stage at zero. 2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the tempered posterior is the prior). 3. Increase :math:`\beta` in order to make the effective sample size equal some predefined value (we use :math:`Nt`, where :math:`t` is 0.5 by default). 4. Compute a set of N importance weights W. The weights are computed as the ratio of the likelihoods of a sample at stage i+1 and stage i. 5. Obtain :math:`S_{w}` by re-sampling according to W. 6. Use W to compute the mean and covariance for the proposal distribution, a MvNormal. 7. Run N independent MCMC chains, starting each one from a different sample in :math:`S_{w}`. For the IMH kernel, the mean of the proposal distribution is the mean of the previous posterior stage and not the current point in parameter space. 8. The N chains are run until the autocorrelation with the samples from the previous stage stops decreasing given a certain threshold. 9. Repeat from step 3 until :math:`\beta \ge 1`. 10. The final result is a collection of N samples from the posterior. References ---------- .. [Minson2013] Minson, S. E., Simons, M., and Beck, J. L. (2013). ""Bayesian inversion for finite fault earthquake source models I- Theory and algorithm."" Geophysical Journal International, 2013, 194(3), pp.1701-1726. `link `__ .. [Ching2007] Ching, J., and Chen, Y. (2007). ""Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class Selection, and Model Averaging."" J. Eng. Mech., 2007, 133(7), pp. 816-832. doi:10.1061/(ASCE)0733-9399(2007)133:7(816). `link `__ """""" if isinstance(kernel, str) and kernel.lower() in (""abc"", ""metropolis""): warnings.warn( f'The kernel string argument ""{kernel}"" in sample_smc has been deprecated. ' f""It is no longer needed to distinguish between `abc` and `metropolis`"", FutureWarning, stacklevel=2, ) kernel = IMH if kernel_kwargs.pop(""save_sim_data"", None) is not None: warnings.warn( ""save_sim_data has been deprecated. Use pm.sample_posterior_predictive "" ""to obtain the same type of samples."", FutureWarning, stacklevel=2, ) if kernel_kwargs.pop(""save_log_pseudolikelihood"", None) is not None: warnings.warn( ""save_log_pseudolikelihood has been deprecated. This information is "" ""now saved as log_likelihood in models with Simulator distributions."", FutureWarning, stacklevel=2, ) parallel = kernel_kwargs.pop(""parallel"", None) if parallel is not None: warnings.warn( ""The argument parallel is deprecated, use the argument cores instead."", FutureWarning, stacklevel=2, ) if parallel is False: cores = 1 if cores is None: cores = _cpu_count() if chains is None: chains = max(2, cores) else: cores = min(chains, cores) if random_seed == -1: raise FutureWarning( f""random_seed should be a non-negative integer or None, got: {random_seed}"" ""This will raise a ValueError in the Future"" ) random_seed = None if isinstance(random_seed, int) or random_seed is None: rng = np.random.default_rng(seed=random_seed) random_seed = list(rng.integers(2**30, size=chains)) elif isinstance(random_seed, Iterable): if len(random_seed) != chains: raise ValueError(f""Length of seeds ({len(seeds)}) must match number of chains {chains}"") else: raise TypeError(""Invalid value for `random_seed`. Must be tuple, list, int or None"") model = modelcontext(model) _log = logging.getLogger(""pymc"") _log.info(""Initializing SMC sampler..."") _log.info( f""Sampling {chains} chain{'s' if chains > 1 else ''} "" f""in {cores} job{'s' if cores > 1 else ''}"" ) params = ( draws, kernel, start, model, ) t1 = time.time() if cores > 1: results = run_chains_parallel( chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs, cores ) else: results = run_chains_sequential( chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs ) ( traces, sample_stats, sample_settings, ) = zip(*results) trace = MultiTrace(traces) _t_sampling = time.time() - t1 sample_stats, idata = _save_sample_stats( sample_settings, sample_stats, chains, trace, return_inferencedata, _t_sampling, idata_kwargs, model, ) if compute_convergence_checks: _compute_convergence_checks(idata, draws, model, trace) return idata if return_inferencedata else trace " 41946,"def objective(trial) -> float: model = LightningNet(trial) datamodule = MNISTDataModule(data_dir=DIR, batch_size=BATCHSIZE) trainer = pl.Trainer( logger=False, limit_val_batches=PERCENT_VALID_EXAMPLES, checkpoint_callback=False, max_epochs=EPOCHS, gpus=-1 if torch.cuda.is_available() else None, callbacks=[PyTorchLightningPruningCallback(trial, monitor=""val_acc"")], ) trainer.fit(model, datamodule=datamodule) return trainer.callback_metrics[""val_acc""].item() ","def objective(trial) -> float: model = LightningNet(trial) datamodule = MNISTDataModule(data_dir=DIR, batch_size=BATCHSIZE) trainer = pl.Trainer( logger=False, limit_val_batches=PERCENT_VALID_EXAMPLES, checkpoint_callback=False, max_epochs=EPOCHS, gpus=1 if torch.cuda.is_available() else None, callbacks=[PyTorchLightningPruningCallback(trial, monitor=""val_acc"")], ) trainer.fit(model, datamodule=datamodule) return trainer.callback_metrics[""val_acc""].item() " 23099,"def test_to_delayed_optimize_graph(tmpdir): b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=1) b2 = b.map(inc).map(inc).map(inc) [d] = b2.to_delayed() text = str(dict(d.dask)) assert text.count(""reify"") == 1 [d2] = b2.to_delayed(optimize_graph=False) assert dict(d2.dask) == dict(b2.dask) assert d.compute() == d2.compute() x = b2.sum() d = x.to_delayed() text = str(dict(d.dask)) assert text.count(""reify"") == 0 d2 = x.to_delayed(optimize_graph=False) assert dict(d2.dask) == dict(x.dask) assert d.compute() == d2.compute() [d] = b2.to_textfiles(tmpdir, compute=False) text = str(dict(d.dask)) assert text.count(""reify"") <= 0 ","def test_to_delayed_optimize_graph(tmpdir): b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=1) b2 = b.map(inc).map(inc).map(inc) [d] = b2.to_delayed() text = str(dict(d.dask)) assert text.count(""reify"") == 1 [d2] = b2.to_delayed(optimize_graph=False) assert dict(d2.dask) == dict(b2.dask) assert d.compute() == d2.compute() x = b2.sum() d = x.to_delayed() text = str(dict(d.dask)) assert text.count(""reify"") == 0 d2 = x.to_delayed(optimize_graph=False) assert dict(d2.dask) == dict(x.dask) assert d.compute() == d2.compute() [d] = b2.to_textfiles(str(tmpdir), compute=False) text = str(dict(d.dask)) assert text.count(""reify"") <= 0 " 17890,"def verify_acl(message, acl): try: if settings.DISABLE_ACL: return True allowed = is_acl_allowed(message.sender.handle, message.sender.id, acl) if allowed: return True if hasattr(message, ""data"") and hasattr(message.data, ""backend_supports_acl""): if not message.data.backend_supports_acl: logging.warning( ""%s was just allowed to perform actions in %s because the backend does not support ACL. This can be a security risk."" % ( message.sender.handle, acl, ) + ""To fix this, set ACL groups in your config.py, or set DISABLE_ACL = True"" ) return True except: pass return False ","def verify_acl(message, acl): try: if settings.DISABLE_ACL: return True allowed = is_acl_allowed(message.sender.id, acl) if allowed: return True if hasattr(message, ""data"") and hasattr(message.data, ""backend_supports_acl""): if not message.data.backend_supports_acl: logging.warning( ""%s was just allowed to perform actions in %s because the backend does not support ACL. This can be a security risk."" % ( message.sender.handle, acl, ) + ""To fix this, set ACL groups in your config.py, or set DISABLE_ACL = True"" ) return True except: pass return False " 32353,"def malware_query_filter(needs_attention: str, malware_type: str, malware_status: str, time_stamp: str, limit_range: int) -> dict: query = [] if bool(needs_attention) is True: query.append({""fieldName"": ""needsAttention"", ""operator"": ""Is"", ""values"": [bool(needs_attention)]}) if bool(malware_type) is True: types = malware_type.split("","") query.append({""fieldName"": ""type"", ""operator"": ""Equals"", ""values"": types}) if bool(malware_status) is True: is_status = malware_status.split("","") query.append({""fieldName"": ""status"", ""operator"": ""Equals"", ""values"": is_status}) if bool(time_stamp) is True: query.append({""fieldName"": ""timestamp"", ""operator"": ""GreaterThan"", ""values"": [int(time_stamp)]}) response = malware_query(query, limit_range) return response ","def malware_query_filter(needs_attention: str, malware_type: str, malware_status: str, time_stamp: str, limit_range: int) -> dict: query = [] if bool(needs_attention) is True: query.append({""fieldName"": ""needsAttention"", ""operator"": ""Is"", ""values"": [bool(needs_attention)]}) if bool(malware_type) is True: types = malware_type.split("","") query.append({""fieldName"": ""type"", ""operator"": ""Equals"", ""values"": types}) if bool(malware_status) is True: is_status = malware_status.split("","") query.append({""fieldName"": ""status"", ""operator"": ""Equals"", ""values"": is_status}) if time_stamp: query.append({""fieldName"": ""timestamp"", ""operator"": ""GreaterThan"", ""values"": [int(time_stamp)]}) response = malware_query(query, limit_range) return response " 32581,"def login(): url = '{}/auth'.format(BASE_PATH) # this call will raise an exception on wrong credential http_request('GET', url) ","def login(): url = f'{BASE_PATH}/auth' # this call will raise an exception on wrong credential http_request('GET', url) " 52292,"def display_open(file, message=""\nDone! To view results""): """"""Print the syntax to open a file based on the platform."""""" cmd_open = None if sys.platform.startswith('linux'): # If user runs SCT within the official Docker distribution, or in WSL, then the command xdg-open will not be # working, therefore we prefer to instruct the user to manually open the file. # Source for WSL environment variables: https://stackoverflow.com/a/61036356 if ""DOCKER"" not in os.environ and ""IS_WSL"" not in os.environ and ""WSL_DISTRO_NAME"" not in os.environ: cmd_open = 'xdg-open' elif sys.platform.startswith('darwin'): cmd_open = 'open' elif sys.platform.startswith('win32'): cmd_open = 'start' if cmd_open: printv(f'{message}, type:') printv(f""{cmd_open} {file}\n"", verbose=1, type='info') else: printv(f'{message}, open the following file:') printv(f""{file}\n"", verbose=1, type='info') ","def display_open(file, message=""\nDone! To view results""): """"""Print the syntax to open a file based on the platform."""""" cmd_open = None if sys.platform.startswith('linux'): # If user runs SCT within the official Docker distribution, or in WSL, then the command xdg-open will not be # working, therefore we prefer to instruct the user to manually open the file. # Source for WSL environment variables: https://stackoverflow.com/a/61036356 if ""DOCKER"" not in os.environ and ""IS_WSL"" not in os.environ and ""WSL_DISTRO_NAME"" not in os.environ: cmd_open = 'xdg-open' elif sys.platform.startswith('darwin'): cmd_open = 'open' elif sys.platform.startswith('win32'): cmd_open = 'start' if cmd_open: printv(f'{message}, type:') printv(f""{cmd_open} {file}\n"", type='info') else: printv(f'{message}, open the following file:') printv(f""{file}\n"", verbose=1, type='info') " 31880,"def test_file_command(requests_mock): """""" Given: - Request file reputation given hash array When: - Running a file reputation command Then: - Return file reputation for each file """""" mock_response = util_load_json('test_data/scan_file_results.json') requests_mock.post(BASE_URL, json=mock_response) client = create_client() args = {""file"": [""094fd325049b8a9cf6d3e5ef2a6d4cc6a567d7d49c35f8bb8dd9e3c6acf3d78d""]} response = MalwareBazaar.file_command(client, args) assert response[0].outputs == mock_response.get('data')[0] assert response[0].outputs_prefix == 'MalwareBazaar.File' assert response[0].outputs_key_field == 'md5_hash' assert response[0].relationships is not None ","def test_file_command(requests_mock): """""" Given: - Request file reputation given hash array When: - Running a file reputation command Then: - Make sure a file reputation for each file is returned. """""" mock_response = util_load_json('test_data/scan_file_results.json') requests_mock.post(BASE_URL, json=mock_response) client = create_client() args = {""file"": [""094fd325049b8a9cf6d3e5ef2a6d4cc6a567d7d49c35f8bb8dd9e3c6acf3d78d""]} response = MalwareBazaar.file_command(client, args) assert response[0].outputs == mock_response.get('data')[0] assert response[0].outputs_prefix == 'MalwareBazaar.File' assert response[0].outputs_key_field == 'md5_hash' assert response[0].relationships is not None " 47115,"def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format=""%(asctime)s - %(levelname)s - %(name)s - %(message)s"", datefmt=""%m/%d/%Y %H:%M:%S"", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) else: data_files = {} if args.train_file is not None: data_files[""train""] = args.train_file if args.validation_file is not None: data_files[""validation""] = args.validation_file extension = args.train_file.split(""."")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = XLNetConfig.from_pretrained(args.model_name_or_path) tokenizer = XLNetTokenizerFast.from_pretrained(args.model_name_or_path) model = XLNetForQuestionAnswering.from_pretrained( args.model_name_or_path, from_tf=bool("".ckpt"" in args.model_name_or_path), config=config ) # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. if args.do_train: column_names = raw_datasets[""train""].column_names elif args.do_eval: column_names = raw_datasets[""validation""].column_names else: column_names = raw_datasets[""test""].column_names question_column_name = ""question"" if ""question"" in column_names else column_names[0] context_column_name = ""context"" if ""context"" in column_names else column_names[1] answer_column_name = ""answers"" if ""answers"" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). pad_on_right = tokenizer.padding_side == ""right"" if args.max_seq_length > tokenizer.model_max_length: logger.warn( f""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"" f""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."" ) max_seq_length = min(args.max_seq_length, tokenizer.model_max_length) # Training preprocessing def prepare_train_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation=""only_second"" if pad_on_right else ""only_first"", max_length=max_seq_length, stride=args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, return_token_type_ids=True, padding=""max_length"", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop(""overflow_to_sample_mapping"") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop(""offset_mapping"") # The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers). special_tokens = tokenized_examples.pop(""special_tokens_mask"") # Let's label those examples! tokenized_examples[""start_positions""] = [] tokenized_examples[""end_positions""] = [] tokenized_examples[""is_impossible""] = [] tokenized_examples[""cls_index""] = [] tokenized_examples[""p_mask""] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples[""input_ids""][i] cls_index = input_ids.index(tokenizer.cls_token_id) tokenized_examples[""cls_index""].append(cls_index) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples[""token_type_ids""][i] for k, s in enumerate(special_tokens[i]): if s: sequence_ids[k] = 3 context_idx = 1 if pad_on_right else 0 # Build the p_mask: non special tokens and context gets 0.0, the others get 1.0. # The cls token gets 1.0 too (for predictions of empty answers). tokenized_examples[""p_mask""].append( [ 0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0 for k, s in enumerate(sequence_ids) ] ) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples[answer_column_name][sample_index] # If no answers are given, set the cls_index as answer. if len(answers[""answer_start""]) == 0: tokenized_examples[""start_positions""].append(cls_index) tokenized_examples[""end_positions""].append(cls_index) tokenized_examples[""is_impossible""].append(1.0) else: # Start/end character index of the answer in the text. start_char = answers[""answer_start""][0] end_char = start_char + len(answers[""text""][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != context_idx: token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != context_idx: token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples[""start_positions""].append(cls_index) tokenized_examples[""end_positions""].append(cls_index) tokenized_examples[""is_impossible""].append(1.0) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples[""start_positions""].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples[""end_positions""].append(token_end_index + 1) tokenized_examples[""is_impossible""].append(0.0) return tokenized_examples if args.do_train: if ""train"" not in raw_datasets: raise ValueError(""--do_train requires a train dataset"") train_dataset = raw_datasets[""train""] if args.max_train_samples is not None: # We will select sample from whole data if agument is specified train_dataset = train_dataset.select(range(args.max_train_samples)) # Create train feature from dataset train_dataset = train_dataset.map( prepare_train_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, ) if args.max_train_samples is not None: # Number of samples might increase during Feature Creation, We select only specified max samples train_dataset = train_dataset.select(range(args.max_train_samples)) # Validation preprocessing def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation=""only_second"" if pad_on_right else ""only_first"", max_length=max_seq_length, stride=args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, return_token_type_ids=True, padding=""max_length"", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop(""overflow_to_sample_mapping"") # The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers). special_tokens = tokenized_examples.pop(""special_tokens_mask"") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. tokenized_examples[""example_id""] = [] # We still provide the index of the CLS token and the p_mask to the model, but not the is_impossible label. tokenized_examples[""cls_index""] = [] tokenized_examples[""p_mask""] = [] for i, input_ids in enumerate(tokenized_examples[""input_ids""]): # Find the CLS token in the input ids. cls_index = input_ids.index(tokenizer.cls_token_id) tokenized_examples[""cls_index""].append(cls_index) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples[""token_type_ids""][i] for k, s in enumerate(special_tokens[i]): if s: sequence_ids[k] = 3 context_idx = 1 if pad_on_right else 0 # Build the p_mask: non special tokens and context gets 0.0, the others 1.0. tokenized_examples[""p_mask""].append( [ 0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0 for k, s in enumerate(sequence_ids) ] ) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples[""example_id""].append(examples[""id""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples[""offset_mapping""][i] = [ (o if sequence_ids[k] == context_idx else None) for k, o in enumerate(tokenized_examples[""offset_mapping""][i]) ] return tokenized_examples if args.do_eval: if ""validation"" not in raw_datasets: raise ValueError(""--do_eval requires a validation dataset"") eval_examples = raw_datasets[""validation""] if args.max_val_samples is not None: # We will select sample from whole data eval_examples = eval_examples.select(range(args.max_val_samples)) # Validation Feature Creation eval_dataset = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, ) if args.max_val_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again eval_dataset = eval_dataset.select(range(args.max_val_samples)) if args.do_predict: if ""test"" not in raw_datasets: raise ValueError(""--do_predict requires a test dataset"") test_examples = raw_datasets[""test""] if args.max_test_samples is not None: # We will select sample from whole data test_examples = test_examples.select(range(args.max_test_samples)) # Test Feature Creation test_dataset = test_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, ) if args.max_test_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again test_dataset = test_dataset.select(range(args.max_test_samples)) # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f""Sample {index} of the training set: {train_dataset[index]}."") # DataLoaders creation: if args.pad_to_max_length: # If padding was already done ot max length, we use the default data collator that will just convert everything # to tensors. data_collator = default_data_collator else: # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size ) if args.do_eval: eval_dataset.set_format(type=""torch"", columns=[""attention_mask"", ""input_ids"", ""token_type_ids""]) eval_dataloader = DataLoader( eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) if args.do_predict: test_dataset.set_format(type=""torch"", columns=[""attention_mask"", ""input_ids"", ""token_type_ids""]) test_dataloader = DataLoader( test_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) # Post-processing: def post_processing_function(examples, features, predictions, stage=""eval""): # Post-processing: we match the start logits and end logits to answers in the original context. predictions, scores_diff_json = postprocess_qa_predictions_with_beam_search( examples=examples, features=features, predictions=predictions, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, start_n_top=model.config.start_n_top, end_n_top=model.config.end_n_top, output_dir=args.output_dir, prefix=stage, ) # Format the result to the format the metric expects. if args.version_2_with_negative: formatted_predictions = [ {""id"": k, ""prediction_text"": v, ""no_answer_probability"": scores_diff_json[k]} for k, v in predictions.items() ] else: formatted_predictions = [{""id"": k, ""prediction_text"": v} for k, v in predictions.items()] references = [{""id"": ex[""id""], ""answers"": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) metric = load_metric(""squad_v2"" if args.version_2_with_negative else ""squad"") def create_and_fill_np_array(start_or_end_logits, dataset, max_len): """""" Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor Args: start_or_end_logits(:obj:`tensor`): This is the output predictions of the model. We can only enter either start or end logits. eval_dataset: Evaluation dataset max_len(:obj:`int`): The maximum length of the output tensor. ( See the model.eval() part for more details ) """""" step = 0 # create a numpy array and fill it with -100. logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32) # Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather for i, output_logit in enumerate(start_or_end_logits): # populate columns # We have to fill it such that we have to take the whole tensor and replace it on the newly created array # And after every iteration we have to change the step batch_size = output_logit.shape[0] cols = output_logit.shape[1] logits_concat[step : step + batch_size, :cols] = output_logit step += batch_size return logits_concat # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = [""bias"", ""LayerNorm.weight""] optimizer_grouped_parameters = [ { ""params"": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], ""weight_decay"": args.weight_decay, }, { ""params"": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], ""weight_decay"": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be # shorter in multiprocess) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info(""***** Running training *****"") logger.info(f"" Num examples = {len(train_dataset)}"") logger.info(f"" Num Epochs = {args.num_train_epochs}"") logger.info(f"" Instantaneous batch size per device = {args.per_device_train_batch_size}"") logger.info(f"" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"") logger.info(f"" Gradient Accumulation steps = {args.gradient_accumulation_steps}"") logger.info(f"" Total optimization steps = {args.max_train_steps}"") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if completed_steps >= args.max_train_steps: break if args.do_eval: # intialize all lists to collect the batches all_start_top_log_probs = [] all_start_top_index = [] all_end_top_log_probs = [] all_end_top_index = [] all_cls_logits = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) start_top_log_probs = outputs.start_top_log_probs start_top_index = outputs.start_top_index end_top_log_probs = outputs.end_top_log_probs end_top_index = outputs.end_top_index cls_logits = outputs.cls_logits if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered start_top_log_probs = accelerator.pad_across_processes(start_top_log_probs, dim=1, pad_index=-100) start_top_index = accelerator.pad_across_processes(start_top_index, dim=1, pad_index=-100) end_top_log_probs = accelerator.pad_across_processes(end_top_log_probs, dim=1, pad_index=-100) end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100) cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100) all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy()) all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy()) all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy()) all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy()) all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy()) max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor # concatenate all numpy arrays collected above start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, eval_dataset, max_len) start_top_index_concat = create_and_fill_np_array(all_start_top_index, eval_dataset, max_len) end_top_log_probs_concat = create_and_fill_np_array(all_end_top_log_probs, eval_dataset, max_len) end_top_index_concat = create_and_fill_np_array(all_end_top_index, eval_dataset, max_len) all_cls_logits = np.concatenate(all_cls_logits, axis=0) # delete the list of numpy arrays del start_top_log_probs del start_top_index del end_top_log_probs del end_top_index eval_dataset.set_format(type=None, columns=list(eval_dataset.features.keys())) outputs_numpy = ( start_top_log_probs_concat, start_top_index_concat, end_top_log_probs_concat, end_top_index_concat, cls_logits, ) prediction = post_processing_function(eval_examples, eval_dataset, outputs_numpy) eval_metric = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f""Evaluation metrics: {eval_metric}"") if args.do_predict: # intialize all lists to collect the batches all_start_top_log_probs = [] all_start_top_index = [] all_end_top_log_probs = [] all_end_top_index = [] all_cls_logits = [] for step, batch in enumerate(test_dataloader): with torch.no_grad(): outputs = model(**batch) start_top_log_probs = outputs.start_top_log_probs start_top_index = outputs.start_top_index end_top_log_probs = outputs.end_top_log_probs end_top_index = outputs.end_top_index cls_logits = outputs.cls_logits if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered start_top_log_probs = accelerator.pad_across_processes(start_top_log_probs, dim=1, pad_index=-100) start_top_index = accelerator.pad_across_processes(start_top_index, dim=1, pad_index=-100) end_top_log_probs = accelerator.pad_across_processes(end_top_log_probs, dim=1, pad_index=-100) end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100) cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100) all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy()) all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy()) all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy()) all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy()) all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy()) max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor # concatenate all numpy arrays collected above start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, test_dataset, max_len) start_top_index_concat = create_and_fill_np_array(all_start_top_index, test_dataset, max_len) end_top_log_probs_concat = create_and_fill_np_array(all_end_top_log_probs, test_dataset, max_len) end_top_index_concat = create_and_fill_np_array(all_end_top_index, test_dataset, max_len) all_cls_logits = np.concatenate(all_cls_logits, axis=0) # delete the list of numpy arrays del start_top_log_probs del start_top_index del end_top_log_probs del end_top_index test_dataset.set_format(type=None, columns=list(test_dataset.features.keys())) outputs_numpy = ( start_top_log_probs_concat, start_top_index_concat, end_top_log_probs_concat, end_top_index_concat, cls_logits, ) prediction = post_processing_function(test_examples, test_dataset, outputs_numpy) test_metric = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f""Test metrics: {test_metric}"") if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) ","def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format=""%(asctime)s - %(levelname)s - %(name)s - %(message)s"", datefmt=""%m/%d/%Y %H:%M:%S"", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) else: data_files = {} if args.train_file is not None: data_files[""train""] = args.train_file if args.validation_file is not None: data_files[""validation""] = args.validation_file extension = args.train_file.split(""."")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = XLNetConfig.from_pretrained(args.model_name_or_path) tokenizer = XLNetTokenizerFast.from_pretrained(args.model_name_or_path) model = XLNetForQuestionAnswering.from_pretrained( args.model_name_or_path, from_tf=bool("".ckpt"" in args.model_name_or_path), config=config ) # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. column_names = raw_datasets[""train""].column_names question_column_name = ""question"" if ""question"" in column_names else column_names[0] context_column_name = ""context"" if ""context"" in column_names else column_names[1] answer_column_name = ""answers"" if ""answers"" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). pad_on_right = tokenizer.padding_side == ""right"" if args.max_seq_length > tokenizer.model_max_length: logger.warn( f""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"" f""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."" ) max_seq_length = min(args.max_seq_length, tokenizer.model_max_length) # Training preprocessing def prepare_train_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation=""only_second"" if pad_on_right else ""only_first"", max_length=max_seq_length, stride=args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, return_token_type_ids=True, padding=""max_length"", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop(""overflow_to_sample_mapping"") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop(""offset_mapping"") # The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers). special_tokens = tokenized_examples.pop(""special_tokens_mask"") # Let's label those examples! tokenized_examples[""start_positions""] = [] tokenized_examples[""end_positions""] = [] tokenized_examples[""is_impossible""] = [] tokenized_examples[""cls_index""] = [] tokenized_examples[""p_mask""] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples[""input_ids""][i] cls_index = input_ids.index(tokenizer.cls_token_id) tokenized_examples[""cls_index""].append(cls_index) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples[""token_type_ids""][i] for k, s in enumerate(special_tokens[i]): if s: sequence_ids[k] = 3 context_idx = 1 if pad_on_right else 0 # Build the p_mask: non special tokens and context gets 0.0, the others get 1.0. # The cls token gets 1.0 too (for predictions of empty answers). tokenized_examples[""p_mask""].append( [ 0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0 for k, s in enumerate(sequence_ids) ] ) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples[answer_column_name][sample_index] # If no answers are given, set the cls_index as answer. if len(answers[""answer_start""]) == 0: tokenized_examples[""start_positions""].append(cls_index) tokenized_examples[""end_positions""].append(cls_index) tokenized_examples[""is_impossible""].append(1.0) else: # Start/end character index of the answer in the text. start_char = answers[""answer_start""][0] end_char = start_char + len(answers[""text""][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != context_idx: token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != context_idx: token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples[""start_positions""].append(cls_index) tokenized_examples[""end_positions""].append(cls_index) tokenized_examples[""is_impossible""].append(1.0) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples[""start_positions""].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples[""end_positions""].append(token_end_index + 1) tokenized_examples[""is_impossible""].append(0.0) return tokenized_examples if args.do_train: if ""train"" not in raw_datasets: raise ValueError(""--do_train requires a train dataset"") train_dataset = raw_datasets[""train""] if args.max_train_samples is not None: # We will select sample from whole data if agument is specified train_dataset = train_dataset.select(range(args.max_train_samples)) # Create train feature from dataset train_dataset = train_dataset.map( prepare_train_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, ) if args.max_train_samples is not None: # Number of samples might increase during Feature Creation, We select only specified max samples train_dataset = train_dataset.select(range(args.max_train_samples)) # Validation preprocessing def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation=""only_second"" if pad_on_right else ""only_first"", max_length=max_seq_length, stride=args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, return_token_type_ids=True, padding=""max_length"", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop(""overflow_to_sample_mapping"") # The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers). special_tokens = tokenized_examples.pop(""special_tokens_mask"") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. tokenized_examples[""example_id""] = [] # We still provide the index of the CLS token and the p_mask to the model, but not the is_impossible label. tokenized_examples[""cls_index""] = [] tokenized_examples[""p_mask""] = [] for i, input_ids in enumerate(tokenized_examples[""input_ids""]): # Find the CLS token in the input ids. cls_index = input_ids.index(tokenizer.cls_token_id) tokenized_examples[""cls_index""].append(cls_index) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples[""token_type_ids""][i] for k, s in enumerate(special_tokens[i]): if s: sequence_ids[k] = 3 context_idx = 1 if pad_on_right else 0 # Build the p_mask: non special tokens and context gets 0.0, the others 1.0. tokenized_examples[""p_mask""].append( [ 0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0 for k, s in enumerate(sequence_ids) ] ) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples[""example_id""].append(examples[""id""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples[""offset_mapping""][i] = [ (o if sequence_ids[k] == context_idx else None) for k, o in enumerate(tokenized_examples[""offset_mapping""][i]) ] return tokenized_examples if args.do_eval: if ""validation"" not in raw_datasets: raise ValueError(""--do_eval requires a validation dataset"") eval_examples = raw_datasets[""validation""] if args.max_val_samples is not None: # We will select sample from whole data eval_examples = eval_examples.select(range(args.max_val_samples)) # Validation Feature Creation eval_dataset = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, ) if args.max_val_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again eval_dataset = eval_dataset.select(range(args.max_val_samples)) if args.do_predict: if ""test"" not in raw_datasets: raise ValueError(""--do_predict requires a test dataset"") test_examples = raw_datasets[""test""] if args.max_test_samples is not None: # We will select sample from whole data test_examples = test_examples.select(range(args.max_test_samples)) # Test Feature Creation test_dataset = test_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, ) if args.max_test_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again test_dataset = test_dataset.select(range(args.max_test_samples)) # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f""Sample {index} of the training set: {train_dataset[index]}."") # DataLoaders creation: if args.pad_to_max_length: # If padding was already done ot max length, we use the default data collator that will just convert everything # to tensors. data_collator = default_data_collator else: # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size ) if args.do_eval: eval_dataset.set_format(type=""torch"", columns=[""attention_mask"", ""input_ids"", ""token_type_ids""]) eval_dataloader = DataLoader( eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) if args.do_predict: test_dataset.set_format(type=""torch"", columns=[""attention_mask"", ""input_ids"", ""token_type_ids""]) test_dataloader = DataLoader( test_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) # Post-processing: def post_processing_function(examples, features, predictions, stage=""eval""): # Post-processing: we match the start logits and end logits to answers in the original context. predictions, scores_diff_json = postprocess_qa_predictions_with_beam_search( examples=examples, features=features, predictions=predictions, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, start_n_top=model.config.start_n_top, end_n_top=model.config.end_n_top, output_dir=args.output_dir, prefix=stage, ) # Format the result to the format the metric expects. if args.version_2_with_negative: formatted_predictions = [ {""id"": k, ""prediction_text"": v, ""no_answer_probability"": scores_diff_json[k]} for k, v in predictions.items() ] else: formatted_predictions = [{""id"": k, ""prediction_text"": v} for k, v in predictions.items()] references = [{""id"": ex[""id""], ""answers"": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) metric = load_metric(""squad_v2"" if args.version_2_with_negative else ""squad"") def create_and_fill_np_array(start_or_end_logits, dataset, max_len): """""" Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor Args: start_or_end_logits(:obj:`tensor`): This is the output predictions of the model. We can only enter either start or end logits. eval_dataset: Evaluation dataset max_len(:obj:`int`): The maximum length of the output tensor. ( See the model.eval() part for more details ) """""" step = 0 # create a numpy array and fill it with -100. logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32) # Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather for i, output_logit in enumerate(start_or_end_logits): # populate columns # We have to fill it such that we have to take the whole tensor and replace it on the newly created array # And after every iteration we have to change the step batch_size = output_logit.shape[0] cols = output_logit.shape[1] logits_concat[step : step + batch_size, :cols] = output_logit step += batch_size return logits_concat # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = [""bias"", ""LayerNorm.weight""] optimizer_grouped_parameters = [ { ""params"": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], ""weight_decay"": args.weight_decay, }, { ""params"": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], ""weight_decay"": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be # shorter in multiprocess) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info(""***** Running training *****"") logger.info(f"" Num examples = {len(train_dataset)}"") logger.info(f"" Num Epochs = {args.num_train_epochs}"") logger.info(f"" Instantaneous batch size per device = {args.per_device_train_batch_size}"") logger.info(f"" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"") logger.info(f"" Gradient Accumulation steps = {args.gradient_accumulation_steps}"") logger.info(f"" Total optimization steps = {args.max_train_steps}"") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if completed_steps >= args.max_train_steps: break if args.do_eval: # intialize all lists to collect the batches all_start_top_log_probs = [] all_start_top_index = [] all_end_top_log_probs = [] all_end_top_index = [] all_cls_logits = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) start_top_log_probs = outputs.start_top_log_probs start_top_index = outputs.start_top_index end_top_log_probs = outputs.end_top_log_probs end_top_index = outputs.end_top_index cls_logits = outputs.cls_logits if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered start_top_log_probs = accelerator.pad_across_processes(start_top_log_probs, dim=1, pad_index=-100) start_top_index = accelerator.pad_across_processes(start_top_index, dim=1, pad_index=-100) end_top_log_probs = accelerator.pad_across_processes(end_top_log_probs, dim=1, pad_index=-100) end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100) cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100) all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy()) all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy()) all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy()) all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy()) all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy()) max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor # concatenate all numpy arrays collected above start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, eval_dataset, max_len) start_top_index_concat = create_and_fill_np_array(all_start_top_index, eval_dataset, max_len) end_top_log_probs_concat = create_and_fill_np_array(all_end_top_log_probs, eval_dataset, max_len) end_top_index_concat = create_and_fill_np_array(all_end_top_index, eval_dataset, max_len) all_cls_logits = np.concatenate(all_cls_logits, axis=0) # delete the list of numpy arrays del start_top_log_probs del start_top_index del end_top_log_probs del end_top_index eval_dataset.set_format(type=None, columns=list(eval_dataset.features.keys())) outputs_numpy = ( start_top_log_probs_concat, start_top_index_concat, end_top_log_probs_concat, end_top_index_concat, cls_logits, ) prediction = post_processing_function(eval_examples, eval_dataset, outputs_numpy) eval_metric = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f""Evaluation metrics: {eval_metric}"") if args.do_predict: # intialize all lists to collect the batches all_start_top_log_probs = [] all_start_top_index = [] all_end_top_log_probs = [] all_end_top_index = [] all_cls_logits = [] for step, batch in enumerate(test_dataloader): with torch.no_grad(): outputs = model(**batch) start_top_log_probs = outputs.start_top_log_probs start_top_index = outputs.start_top_index end_top_log_probs = outputs.end_top_log_probs end_top_index = outputs.end_top_index cls_logits = outputs.cls_logits if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered start_top_log_probs = accelerator.pad_across_processes(start_top_log_probs, dim=1, pad_index=-100) start_top_index = accelerator.pad_across_processes(start_top_index, dim=1, pad_index=-100) end_top_log_probs = accelerator.pad_across_processes(end_top_log_probs, dim=1, pad_index=-100) end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100) cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100) all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy()) all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy()) all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy()) all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy()) all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy()) max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor # concatenate all numpy arrays collected above start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, test_dataset, max_len) start_top_index_concat = create_and_fill_np_array(all_start_top_index, test_dataset, max_len) end_top_log_probs_concat = create_and_fill_np_array(all_end_top_log_probs, test_dataset, max_len) end_top_index_concat = create_and_fill_np_array(all_end_top_index, test_dataset, max_len) all_cls_logits = np.concatenate(all_cls_logits, axis=0) # delete the list of numpy arrays del start_top_log_probs del start_top_index del end_top_log_probs del end_top_index test_dataset.set_format(type=None, columns=list(test_dataset.features.keys())) outputs_numpy = ( start_top_log_probs_concat, start_top_index_concat, end_top_log_probs_concat, end_top_index_concat, cls_logits, ) prediction = post_processing_function(test_examples, test_dataset, outputs_numpy) test_metric = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f""Test metrics: {test_metric}"") if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) " 17944,"def average_rate( target: ndarray, varying: ArrayLike[float], trim: Optional[ArrayLike[float]] = None, ) -> ndarray: """"""Computes the average rate of a target net income. Given a `target` net income, and according to the `varying` gross income. Optionally, a `trim` can be applied consisting on the lower and upper bounds of the average rate to be computed. Args: target (ndarray): The targeted net income. varying (:obj:`ArrayLike[float]`): The varying gross income. trim (:obj:`ArrayLike[float]`, optional): The lower and upper bounds of the average rate. Defaults to None. Returns: ndarray: The average rate for each target. When `trim` is provided, values that are out of the provided bounds are replaced by :obj:`numpy.nan`. Examples: >>> target = numpy.array([1, 2, 3]) >>> varying = 2 >>> trim = [-1, .25] >>> average_rate(target, varying, trim) array([ nan, 0. , -0.5]) """""" average_rate = 1 - target / varying if trim is not None: average_rate = numpy.where(average_rate <= max(trim), average_rate, numpy.nan) average_rate = numpy.where(average_rate >= min(trim), average_rate, numpy.nan) return average_rate ","def average_rate( target: ndarray, varying: ArrayLike[float], trim: Optional[ArrayLike[float]] = None, ) -> ndarray: """"""Computes the average rate of a target net income. Given a `target` net income, and according to the `varying` gross income. Optionally, a `trim` can be applied consisting of the lower and the upper bounds of the average rate to be computed. Args: target (ndarray): The targeted net income. varying (:obj:`ArrayLike[float]`): The varying gross income. trim (:obj:`ArrayLike[float]`, optional): The lower and upper bounds of the average rate. Defaults to None. Returns: ndarray: The average rate for each target. When `trim` is provided, values that are out of the provided bounds are replaced by :obj:`numpy.nan`. Examples: >>> target = numpy.array([1, 2, 3]) >>> varying = 2 >>> trim = [-1, .25] >>> average_rate(target, varying, trim) array([ nan, 0. , -0.5]) """""" average_rate = 1 - target / varying if trim is not None: average_rate = numpy.where(average_rate <= max(trim), average_rate, numpy.nan) average_rate = numpy.where(average_rate >= min(trim), average_rate, numpy.nan) return average_rate " 56837,"def get_scheduled_report_ids(period, start_datetime=None, end_datetime=None): end_datetime = end_datetime or datetime.utcnow() assert period in ('hourly', 'daily', 'weekly', 'monthly'), period if not start_datetime: start_datetime = _get_default_start_datetime(end_datetime) for target_point_in_time in _iter_15_minute_marks_in_range(start_datetime, end_datetime): if period == 'hourly' and target_point_in_time.minute != 0: # Don't care if not on hour continue keys = _make_all_notification_view_keys(period, target_point_in_time) for key in keys: for result in ReportNotification.view( ""reportconfig/all_notifications"", reduce=False, include_docs=False, **key ).all(): if period == 'hourly': hour = result['value'].get('hour') stop_hour = result['value'].get('stop_hour') # For backwards compatibility and a general safety measure if type(hour) != int or type(stop_hour) != int: yield result['id'] elif hour <= target_point_in_time.hour <= stop_hour: yield result['id'] else: yield result['id'] ","def get_scheduled_report_ids(period, start_datetime=None, end_datetime=None): end_datetime = end_datetime or datetime.utcnow() assert period in ('hourly', 'daily', 'weekly', 'monthly'), period if not start_datetime: start_datetime = _get_default_start_datetime(end_datetime) for target_point_in_time in _iter_15_minute_marks_in_range(start_datetime, end_datetime): if period == 'hourly' and target_point_in_time.minute != 0: # Don't care if not on hour continue keys = _make_all_notification_view_keys(period, target_point_in_time) for key in keys: for result in ReportNotification.view( ""reportconfig/all_notifications"", reduce=False, include_docs=False, **key ).all(): if period == 'hourly': try: hour = int(result['value'].get('hour')) stop_hour = int(result['value'].get('stop_hour')) if hour <= target_point_in_time.hour <= stop_hour: yield result['id'] except (TypeError, ValueError): yield result['id'] else: yield result['id'] " 6851,"def sync_events_from_google_calendar(g_calendar, method=None): """""" Syncs Events from Google Calendar in Framework Calendar. Google Calendar returns nextSyncToken when all the events in Google Calendar are fetched. nextSyncToken is returned at the very last page https://developers.google.com/calendar/v3/sync """""" google_calendar, account = get_google_calendar_object(g_calendar) if not account.pull_from_google_calendar: return sync_token = account.get_password(fieldname=""next_sync_token"", raise_exception=False) or None events = frappe._dict() results = [] while True: try: # API Response listed at EOF events = google_calendar.events().list(calendarId=account.google_calendar_id, maxResults=2000, pageToken=events.get(""nextPageToken""), singleEvents=False, showDeleted=True, syncToken=sync_token).execute() except HttpError as err: msg = _(""Google Calendar - Could not fetch event from Google Calendar, error code {0}."").format(err.resp.status) if err.resp.status == 410: set_encrypted_password(""Google Calendar"", account.name, """", ""next_sync_token"") frappe.db.commit() msg += _(' Sync token was invalid and has been resetted, Retry syncing.') frappe.msgprint(msg, title='Invalid Sync Token', indicator='blue') else: frappe.throw(msg) for event in events.get(""items"", []): results.append(event) if not events.get(""nextPageToken""): if events.get(""nextSyncToken""): account.next_sync_token = events.get(""nextSyncToken"") account.save() break for idx, event in enumerate(results): frappe.publish_realtime(""import_google_calendar"", dict(progress=idx+1, total=len(results)), user=frappe.session.user) # If Google Calendar Event if confirmed, then create an Event if event.get(""status"") == ""confirmed"": recurrence = None if event.get(""recurrence""): try: recurrence = event.get(""recurrence"")[0] except IndexError: pass if not frappe.db.exists(""Event"", {""google_calendar_event_id"": event.get(""id"")}): insert_event_to_calendar(account, event, recurrence) else: update_event_in_calendar(account, event, recurrence) elif event.get(""status"") == ""cancelled"": # If any synced Google Calendar Event is cancelled, then close the Event frappe.db.set_value(""Event"", {""google_calendar_id"": account.google_calendar_id, ""google_calendar_event_id"": event.get(""id"")}, ""status"", ""Closed"") frappe.get_doc({ ""doctype"": ""Comment"", ""comment_type"": ""Info"", ""reference_doctype"": ""Event"", ""reference_name"": frappe.db.get_value(""Event"", {""google_calendar_id"": account.google_calendar_id, ""google_calendar_event_id"": event.get(""id"")}, ""name""), ""content"": "" - Event deleted from Google Calendar."", }).insert(ignore_permissions=True) else: pass if not results: return _(""No Google Calendar Event to sync."") elif len(results) == 1: return _(""1 Google Calendar Event synced."") else: return _(""{0} Google Calendar Events synced."").format(len(results)) ","def sync_events_from_google_calendar(g_calendar, method=None): """""" Syncs Events from Google Calendar in Framework Calendar. Google Calendar returns nextSyncToken when all the events in Google Calendar are fetched. nextSyncToken is returned at the very last page https://developers.google.com/calendar/v3/sync """""" google_calendar, account = get_google_calendar_object(g_calendar) if not account.pull_from_google_calendar: return sync_token = account.get_password(fieldname=""next_sync_token"", raise_exception=False) or None events = frappe._dict() results = [] while True: try: # API Response listed at EOF events = google_calendar.events().list(calendarId=account.google_calendar_id, maxResults=2000, pageToken=events.get(""nextPageToken""), singleEvents=False, showDeleted=True, syncToken=sync_token).execute() except HttpError as err: msg = _(""Google Calendar - Could not fetch event from Google Calendar, error code {0}."").format(err.resp.status) if err.resp.status == 410: set_encrypted_password(""Google Calendar"", account.name, """", ""next_sync_token"") frappe.db.commit() msg += ' ' + _('Sync token was invalid and has been resetted, Retry syncing.') frappe.msgprint(msg, title='Invalid Sync Token', indicator='blue') else: frappe.throw(msg) for event in events.get(""items"", []): results.append(event) if not events.get(""nextPageToken""): if events.get(""nextSyncToken""): account.next_sync_token = events.get(""nextSyncToken"") account.save() break for idx, event in enumerate(results): frappe.publish_realtime(""import_google_calendar"", dict(progress=idx+1, total=len(results)), user=frappe.session.user) # If Google Calendar Event if confirmed, then create an Event if event.get(""status"") == ""confirmed"": recurrence = None if event.get(""recurrence""): try: recurrence = event.get(""recurrence"")[0] except IndexError: pass if not frappe.db.exists(""Event"", {""google_calendar_event_id"": event.get(""id"")}): insert_event_to_calendar(account, event, recurrence) else: update_event_in_calendar(account, event, recurrence) elif event.get(""status"") == ""cancelled"": # If any synced Google Calendar Event is cancelled, then close the Event frappe.db.set_value(""Event"", {""google_calendar_id"": account.google_calendar_id, ""google_calendar_event_id"": event.get(""id"")}, ""status"", ""Closed"") frappe.get_doc({ ""doctype"": ""Comment"", ""comment_type"": ""Info"", ""reference_doctype"": ""Event"", ""reference_name"": frappe.db.get_value(""Event"", {""google_calendar_id"": account.google_calendar_id, ""google_calendar_event_id"": event.get(""id"")}, ""name""), ""content"": "" - Event deleted from Google Calendar."", }).insert(ignore_permissions=True) else: pass if not results: return _(""No Google Calendar Event to sync."") elif len(results) == 1: return _(""1 Google Calendar Event synced."") else: return _(""{0} Google Calendar Events synced."").format(len(results)) " 16377,"def validate_requirements_format(integration: Integration) -> bool: """"""Validate requirements format. Returns if valid. """""" start_errors = len(integration.errors) for req in integration.requirements: if "" "" in req: integration.add_error( ""requirements"", f'Requirement ""{req}"" contains a space', ) continue pkg, sep, version = PACKAGE_REGEX.match(req).groups() if integration.core and sep != ""=="": integration.add_error( ""requirements"", f'Requirement {req} need to be pinned ""=="".', ) continue if not version: continue for part in version.split("",""): version_part = PIP_VERSION_RANGE_SEPERATOR.match(part) if ( version_part and AwesomeVersion(version_part.group(2)).strategy == AwesomeVersionStrategy.UNKNOWN ): integration.add_error( ""requirements"", f""Unable to parse package version ({version}) for {pkg}."", ) continue return len(integration.errors) == start_errors ","def validate_requirements_format(integration: Integration) -> bool: """"""Validate requirements format. Returns if valid. """""" start_errors = len(integration.errors) for req in integration.requirements: if "" "" in req: integration.add_error( ""requirements"", f'Requirement ""{req}"" contains a space', ) continue pkg, sep, version = PACKAGE_REGEX.match(req).groups() if integration.core and sep != ""=="": integration.add_error( ""requirements"", f'Requirement {req} need to be pinned ""=="".', ) continue if not version: continue for part in version.split("",""): version_part = PIP_VERSION_RANGE_SEPARATOR.match(part) if ( version_part and AwesomeVersion(version_part.group(2)).strategy == AwesomeVersionStrategy.UNKNOWN ): integration.add_error( ""requirements"", f""Unable to parse package version ({version}) for {pkg}."", ) continue return len(integration.errors) == start_errors " 58824,"def dot(x, y, out=None): """"""Computes the dot product of x and y."""""" dtype = x.dtype.char if dtype == 'f': func = cublas.sdot elif dtype == 'd': func = cublas.ddot elif dtype in 'FD': raise TypeError('Use dotu() or dotc() for complex dtype') else: raise TypeError('invalid dtype') _check_two_vectors(x, y) handle = device.get_cublas_handle() result_dtype = dtype result_ptr, result, mode = _setup_result_ptr(handle, out, result_dtype) func(handle, x.size, x.data.ptr, 1, y.data.ptr, 1, result_ptr) cublas.setPointerMode(handle, mode) if out is None: out = result elif out.dtype != result_dtype: out[...] = result return out ","def dot(x, y, out=None): """"""Computes the dot product of x and y."""""" dtype = x.dtype.char if dtype == 'f': func = cublas.sdot elif dtype == 'd': func = cublas.ddot elif dtype in 'FD': raise TypeError('Use dotu() or dotc() for complex dtype') else: raise TypeError('invalid dtype') _check_two_vectors(x, y) handle = device.get_cublas_handle() result_dtype = dtype result_ptr, result, orig_mode = _setup_result_ptr(handle, out, result_dtype) func(handle, x.size, x.data.ptr, 1, y.data.ptr, 1, result_ptr) cublas.setPointerMode(handle, orig_mode) if out is None: out = result elif out.dtype != result_dtype: out[...] = result return out " 27777,"def prepare_release_pr(base_branch: str, is_major: bool, token: str) -> None: print() print(f""Precessing release for branch {Fore.CYAN}{base_branch}"") check_call([""git"", ""checkout"", f""origin/{base_branch}""]) try: version = find_next_version(base_branch, is_major) except InvalidFeatureRelease as e: print(f""{Fore.RED}{e}"") raise SystemExit(1) print(f""Version: {Fore.CYAN}{version}"") release_branch = f""release-{version}"" run( [""git"", ""config"", ""user.name"", ""pytest bot""], text=True, check=True, capture_output=True, ) run( [""git"", ""config"", ""user.email"", ""pytestbot@gmail.com""], text=True, check=True, capture_output=True, ) run( [""git"", ""checkout"", ""-b"", release_branch, f""origin/{base_branch}""], text=True, check=True, capture_output=True, ) print(f""Branch {Fore.CYAN}{release_branch}{Fore.RESET} created."") # important to use tox here because we have changed branches, so dependencies # might have changed as well cmdline = [""tox"", ""-e"", ""release"", ""--"", version, ""--skip-check-links""] print(""Running"", "" "".join(cmdline)) run( cmdline, text=True, check=True, capture_output=True, ) oauth_url = f""https://{token}:x-oauth-basic@github.com/{SLUG}.git"" run( [""git"", ""push"", oauth_url, f""HEAD:{release_branch}"", ""--force""], text=True, check=True, capture_output=True, ) print(f""Branch {Fore.CYAN}{release_branch}{Fore.RESET} pushed."") body = PR_BODY.format(version=version) repo = login(token) pr = repo.create_pull( f""Prepare release {version}"", base=base_branch, head=release_branch, body=body, ) print(f""Pull request {Fore.CYAN}{pr.url}{Fore.RESET} created."") ","def prepare_release_pr(base_branch: str, is_major: bool, token: str) -> None: print() print(f""Processing release for branch {Fore.CYAN}{base_branch}"") check_call([""git"", ""checkout"", f""origin/{base_branch}""]) try: version = find_next_version(base_branch, is_major) except InvalidFeatureRelease as e: print(f""{Fore.RED}{e}"") raise SystemExit(1) print(f""Version: {Fore.CYAN}{version}"") release_branch = f""release-{version}"" run( [""git"", ""config"", ""user.name"", ""pytest bot""], text=True, check=True, capture_output=True, ) run( [""git"", ""config"", ""user.email"", ""pytestbot@gmail.com""], text=True, check=True, capture_output=True, ) run( [""git"", ""checkout"", ""-b"", release_branch, f""origin/{base_branch}""], text=True, check=True, capture_output=True, ) print(f""Branch {Fore.CYAN}{release_branch}{Fore.RESET} created."") # important to use tox here because we have changed branches, so dependencies # might have changed as well cmdline = [""tox"", ""-e"", ""release"", ""--"", version, ""--skip-check-links""] print(""Running"", "" "".join(cmdline)) run( cmdline, text=True, check=True, capture_output=True, ) oauth_url = f""https://{token}:x-oauth-basic@github.com/{SLUG}.git"" run( [""git"", ""push"", oauth_url, f""HEAD:{release_branch}"", ""--force""], text=True, check=True, capture_output=True, ) print(f""Branch {Fore.CYAN}{release_branch}{Fore.RESET} pushed."") body = PR_BODY.format(version=version) repo = login(token) pr = repo.create_pull( f""Prepare release {version}"", base=base_branch, head=release_branch, body=body, ) print(f""Pull request {Fore.CYAN}{pr.url}{Fore.RESET} created."") " 43717,"def sample(op): r""""""Sample from the supplied observable, with the number of shots determined from the ``dev.shots`` attribute of the corresponding device. The samples are drawn from the eigenvalues :math:`\{\lambda_i\}` of the observable. The probability of drawing eigenvalue :math:`\lambda_i` is given by :math:`p(\lambda_i) = |\langle \xi_i | \psi \rangle|^2`, where :math:`| \xi_i \rangle` is the corresponding basis state from the observable’s eigenbasis. **Example:** .. code-block:: python3 dev = qml.device(""default.qubit"", wires=2, shots=4) @qml.qnode(dev) def circuit(x): qml.RX(x, wires=0) qml.Hadamard(wires=1) qml.CNOT(wires=[0, 1]) return qml.sample(qml.PauliY(0)) Executing this QNode: >>> circuit(0.5) array([ 1., 1., 1., -1.]) Args: op (Observable): a quantum observable object Raises: QuantumFunctionError: `op` is not an instance of :class:`~.Observable` """""" if not isinstance(op, Observable): raise QuantumFunctionError( ""{} is not an observable: cannot be used with sample"".format(op.name) ) return MeasurementProcess(Sample, obs=op) ","def sample(op): r""""""Sample from the supplied observable, with the number of shots determined from the ``dev.shots`` attribute of the corresponding device. The samples are drawn from the eigenvalues :math:`\{\lambda_i\}` of the observable. The probability of drawing eigenvalue :math:`\lambda_i` is given by :math:`p(\lambda_i) = |\langle \xi_i | \psi \rangle|^2`, where :math:`| \xi_i \rangle` is the corresponding basis state from the observable's eigenbasis. **Example:** .. code-block:: python3 dev = qml.device(""default.qubit"", wires=2, shots=4) @qml.qnode(dev) def circuit(x): qml.RX(x, wires=0) qml.Hadamard(wires=1) qml.CNOT(wires=[0, 1]) return qml.sample(qml.PauliY(0)) Executing this QNode: >>> circuit(0.5) array([ 1., 1., 1., -1.]) Args: op (Observable): a quantum observable object Raises: QuantumFunctionError: `op` is not an instance of :class:`~.Observable` """""" if not isinstance(op, Observable): raise QuantumFunctionError( ""{} is not an observable: cannot be used with sample"".format(op.name) ) return MeasurementProcess(Sample, obs=op) " 49809,"def block_safe(block): """""" Check if the block is safe to work with. A BlockUserData must have been set on the block while it was known safe. If an editor is cleared by editor.clear() or editor.set_text() for example, all the old blocks will continue to report block.isValid() == True but will raise a Segmentation Fault on almost all methods. One way to check is that the userData is reset to None or QTextBlockUserData. So if a block is known to have setUserData to BlockUserData, this fact can be used to check the block. """""" return block.isValid() and isinstance(block.userData(), BlockUserData) ","def block_safe(block): """""" Check if the block is safe to work with. A BlockUserData must have been set on the block while it was known safe. If an editor is cleared by editor.clear() or editor.set_text() for example, all the old blocks will continue to report block.isValid() == True but will raise a Segmentation Fault on almost all methods. One way to check if a block is valid is that the userData is reset to None or QTextBlockUserData. So if a block is known to have setUserData to BlockUserData, this fact can be used to check the block. """""" return block.isValid() and isinstance(block.userData(), BlockUserData) " 24415,"def show_report(report): if report['failed']: echo_failure(""FAILED"") else: echo_success(""Profile successfuly validated"") for display_func, message in report['messages']: display_func(message)","def show_report(report): if report['failed']: echo_failure(""FAILED"") else: echo_success(""Profile successfuly validated"") for display_func, message in report['messages']: display_func(message) " 5200,"def test_addfont_as_path(): """"""Smoke test that addfont() accepts pathlib.Path."""""" font_test_file = 'mpltest.ttf' path = Path(__file__).parent / font_test_file try: fontManager.addfont(path) added, = [font for font in fontManager.ttflist if font.fname.endswith('mpltest.ttf')] fontManager.ttflist.remove(added) finally: to_remove = [font for font in fontManager.ttflist if font.fname.endswith('mpltest.ttf')] for font in to_remove: fontManager.ttflist.remove(font) ","def test_addfont_as_path(): """"""Smoke test that addfont() accepts pathlib.Path."""""" font_test_file = 'mpltest.ttf' path = Path(__file__).parent / font_test_file try: fontManager.addfont(path) added, = [font for font in fontManager.ttflist if font.fname.endswith('mpltest.ttf')] fontManager.ttflist.remove(added) finally: to_remove = [font for font in fontManager.ttflist if font.fname.endswith(font_test_file)] for font in to_remove: fontManager.ttflist.remove(font) " 59177,"def chown(path, user=None, group=None, dir_fd=None, follow_symlinks=True): """"""Change owner user and group of the given path. user and group can be the uid/gid or the user/group names, and in that case, they are converted to their respective uid/gid. If dir_fd is set, it should be an open file descriptor to a directory. If follow_symlinks is set to False and the last element of the path is a symbolic link, chown will modify the link itself and not the file being referenced by the link. """""" if user is None and group is None: raise ValueError(""user and/or group must be set"") _user = user _group = group # -1 means don't change it if user is None: _user = -1 # user can either be an int (the uid) or a string (the system username) elif isinstance(user, str): _user = _get_uid(user) if _user is None: raise LookupError(""no such user: {!r}"".format(user)) if group is None: _group = -1 elif not isinstance(group, int): _group = _get_gid(group) if _group is None: raise LookupError(""no such group: {!r}"".format(group)) os.chown(path, _user, _group, dir_fd=dir_fd, follow_symlinks=follow_symlinks) ","def chown(path, user=None, group=None, dir_fd=None, follow_symlinks=True): """"""Change owner user and group of the given path. user and group can be the uid/gid or the user/group names, and in that case, they are converted to their respective uid/gid. If dir_fd is set, it should be an open file descriptor to the directory to be used as the root of *path* if it is relative. If follow_symlinks is set to False and the last element of the path is a symbolic link, chown will modify the link itself and not the file being referenced by the link. """""" if user is None and group is None: raise ValueError(""user and/or group must be set"") _user = user _group = group # -1 means don't change it if user is None: _user = -1 # user can either be an int (the uid) or a string (the system username) elif isinstance(user, str): _user = _get_uid(user) if _user is None: raise LookupError(""no such user: {!r}"".format(user)) if group is None: _group = -1 elif not isinstance(group, int): _group = _get_gid(group) if _group is None: raise LookupError(""no such group: {!r}"".format(group)) os.chown(path, _user, _group, dir_fd=dir_fd, follow_symlinks=follow_symlinks) " 48633,"def sd_notify(message: str) -> None: notify_socket = os.environ.get('NOTIFY_SOCKET') if not notify_socket: return if notify_socket[0] == '@': notify_socket = '\0' + notify_socket[1:] sd_sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) try: sd_sock.connect(notify_socket) sd_sock.sendall(message.encode()) except Exception as e: logger.info('Could not send systemd notification: %s', e) finally: sd_sock.close() ","def sd_notify(message: str) -> None: notify_socket = os.environ.get('NOTIFY_SOCKET') if not notify_socket: return if notify_socket[0] == '@': notify_socket = '\0' + notify_socket[1:] with socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) as sd_sock: try: sd_sock.connect(notify_socket) sd_sock.sendall(message.encode()) except Exception as e: logger.info('Could not send systemd notification: %s', e) " 5755,"def get_matfile_version(file_name, appendmat=True): """""" Return major, minor tuple depending on apparent mat file type Where: #. 0,x -> version 4 format mat files #. 1,x -> version 5 format mat files #. 2,x -> version 7.3 format mat files (HDF format) Parameters ---------- file_name : str Name of the mat file (do not need .mat extension if appendmat==True). Can also pass open file-like object. appendmat : bool, optional True to append the .mat extension to the end of the given filename, if not already present. Returns ------- major_version : {0, 1, 2} major MATLAB File format version minor_version : int minor MATLAB file format version Raises ------ MatReadError If the file is empty. ValueError The matfile version is unknown. Notes ----- Has the side effect of setting the file read pointer to 0 """""" from .mio import _open_file_context with _open_file_context(file_name, appendmat=appendmat) as fileobj: return _get_matfile_version(fileobj) ","def get_matfile_version(file_name, *, appendmat=True): """""" Return major, minor tuple depending on apparent mat file type Where: #. 0,x -> version 4 format mat files #. 1,x -> version 5 format mat files #. 2,x -> version 7.3 format mat files (HDF format) Parameters ---------- file_name : str Name of the mat file (do not need .mat extension if appendmat==True). Can also pass open file-like object. appendmat : bool, optional True to append the .mat extension to the end of the given filename, if not already present. Returns ------- major_version : {0, 1, 2} major MATLAB File format version minor_version : int minor MATLAB file format version Raises ------ MatReadError If the file is empty. ValueError The matfile version is unknown. Notes ----- Has the side effect of setting the file read pointer to 0 """""" from .mio import _open_file_context with _open_file_context(file_name, appendmat=appendmat) as fileobj: return _get_matfile_version(fileobj) " 30453,"def get_self_deployed_token(): if not (AUTH_ID and SELF_TENANT_ID and APP_SECRET): return_error('You must provide the Tenant ID, Application ID and Client Secret.') integration_context = demisto.getIntegrationContext() if integration_context and integration_context['token_expiration_time']: token_expiration_time = integration_context['token_expiration_time'] now = int(time.time()) if token_expiration_time < now: return integration_context['token'] url = 'https://login.windows.net/{}/oauth2/token'.format(SELF_TENANT_ID) resource_app_id_uri = 'https://api.securitycenter.windows.com' data = { 'resource': resource_app_id_uri, 'client_id': APP_ID, 'client_secret': APP_SECRET, 'grant_type': 'client_credentials' } response = requests.post(url, data, verify=USE_SSL) body = response.json() if response.status_code != 200: return_error('Error in Microsoft authorization: {}'.format(str(body))) demisto.setIntegrationContext({ 'token_expiration_time': body['expires_on'], 'token': body['access_token'] }) return body['access_token'] ","def get_self_deployed_token(): if not (AUTH_ID and SELF_TENANT_ID and APP_SECRET): return_error('You must provide the Tenant ID, Application ID and Client Secret.') integration_context = demisto.getIntegrationContext() if integration_context and integration_context['token_expiration_time']: token_expiration_time = integration_context['token_expiration_time'] now = int(time.time()) if token_expiration_time < now: return integration_context['token'] url = 'https://login.windows.net/{}/oauth2/token'.format(SELF_TENANT_ID) resource_app_id_uri = 'https://api.securitycenter.windows.com' data = { 'resource': resource_app_id_uri, 'client_id': APP_ID, 'client_secret': APP_SECRET, 'grant_type': 'client_credentials' } response = requests.post(url, data, verify=USE_SSL) body = response.json() if response.status_code != 200: return_error('Error in Microsoft authorization: {}'.format(str(body))) demisto.setIntegrationContext({ 'token_expiration_time': body.get('expires_on', 3595), 'token': body['access_token'] }) return body['access_token'] " 6578,"def execute(): company = frappe.get_all('Company', filters = {'country': 'India'}) if not company: return irn_cancelled_field = frappe.db.exists('Custom Field', {'dt': 'Sales Invoice', 'fieldname': 'irn_cancelled'}) if irn_cancelled_field: frappe.db.set_value('Custom Field', irn_cancelled_field, 'depends_on', '') frappe.db.set_value('Custom Field', irn_cancelled_field, 'read_only', 0)","def execute(): company = frappe.get_all('Company', filters = {'country': 'India'}) if not company: return irn_cancelled_field = frappe.db.exists('Custom Field', {'dt': 'Sales Invoice', 'fieldname': 'irn_cancelled'}) if irn_cancelled_field: frappe.db.set_value('Custom Field', irn_cancelled_field, 'depends_on', 'eval: doc.irn') frappe.db.set_value('Custom Field', irn_cancelled_field, 'read_only', 0)" 34037,"def test_run(ray_start_4_cpus): """"""Tests that Train can be run without any specific backends."""""" num_workers = 2 key = ""value"" value = 1 config = TestConfig() def train_func(): checkpoint = train.load_checkpoint() train.report(**checkpoint) train.save_checkpoint(**checkpoint) return checkpoint[key] checkpoint = Checkpoint.from_dict( { # this would be set during checkpoint saving ""_current_checkpoint_id"": 1, key: value, } ) trainer = DataParallelTrainer( train_func, backend_config=config, resume_from_checkpoint=checkpoint, scaling_config=dict(num_workers=num_workers), ) results = trainer.fit() assert results.checkpoint ","def test_run(ray_start_4_cpus): """"""Tests that Train can be run without any specific backends."""""" num_workers = 2 key = ""value"" value = 1 config = TestConfig() def train_func(): checkpoint = train.load_checkpoint() train.report(**checkpoint) train.save_checkpoint(**checkpoint) return checkpoint[key] checkpoint = Checkpoint.from_dict( { # this would be set during checkpoint saving ""_current_checkpoint_id"": 1, key: value, } ) trainer = DataParallelTrainer( train_func, backend_config=config, resume_from_checkpoint=checkpoint, scaling_config=dict(num_workers=num_workers), ) results = trainer.fit() assert results.checkpoint == checkpoint " 7064,"def runN_remover(workflow_id: str) -> str: if re.findall(r'(.*)\/run\d+$', workflow_id): return re.findall(r'(.*)\/run\d+$', workflow_id)[0] else: return workflow_id ","def runN_remover(workflow_id: str) -> str: return re.sub(rf'{re.escape(os.sep)}run\d+$', '', workflow_id) " 8908,"def subreddit_info(bot, trigger, match, commanded=False): """"""Shows information about the given subreddit"""""" match_lower = match.lower() if match_lower in ['all', 'popular']: message = ('[REDDIT] {link}{nsfw} | {public_description}') nsfw = ' ' + bold(color('[Possible NSFW]', colors.ORANGE)) link = ""https://reddit.com/r/"" + match_lower public_description = '' if match_lower == 'all': public_description = ('Today\'s top content from hundreds of ' 'thousands of Reddit communities.') elif match_lower == 'popular': public_description = ('The top trending content from some of ' 'Reddit\'s most popular communities') message = message.format( link=link, nsfw=nsfw, public_description=public_description) bot.say(message) return plugin.NOLIMIT r = bot.memory['reddit_praw'] try: r.subreddits.search_by_name(match, exact=True) except prawcore.exceptions.NotFound: if commanded: bot.say('No such subreddit.') # Fail silently if it wasn't an explicit command. return plugin.NOLIMIT try: s = r.subreddit(match) s.subreddit_type except prawcore.exceptions.Forbidden: bot.reply(""r/"" + match + "" appears to be a private subreddit!"") return plugin.NOLIMIT except prawcore.exceptions.NotFound: bot.reply(""r/"" + match + "" appears to be a banned subreddit!"") return plugin.NOLIMIT link = ""https://reddit.com/r/"" + s.display_name created = get_time_created(bot, trigger, s.created_utc) message = ('{link}{nsfw} | {subscribers} subscribers | ' 'Created at {created} | {public_description}') nsfw = '' if s.over18: nsfw += ' ' + bold(color('[NSFW]', colors.RED)) sfw = bot.db.get_channel_value(trigger.sender, 'sfw') if sfw: link = '(link hidden)' bot.kick( trigger.nick, trigger.sender, 'Linking to NSFW content in a SFW channel.' ) message = message.format( link=link, nsfw=nsfw, subscribers='{:,}'.format(s.subscribers), created=created, public_description=s.public_description) bot.say(message) ","def subreddit_info(bot, trigger, match, commanded=False): """"""Shows information about the given subreddit"""""" match_lower = match.lower() if match_lower in ['all', 'popular']: message = ('[REDDIT] {link}{nsfw} | {public_description}') nsfw = ' ' + bold(color('[Possible NSFW]', colors.ORANGE)) link = ""https://reddit.com/r/"" + match_lower public_description = '' if match_lower == 'all': public_description = ('Today\'s top content from hundreds of ' 'thousands of Reddit communities.') elif match_lower == 'popular': public_description = ('The top trending content from some of ' 'Reddit\'s most popular communities') message = message.format( link=link, nsfw=nsfw, public_description=public_description) bot.say(message) return plugin.NOLIMIT r = bot.memory['reddit_praw'] try: r.subreddits.search_by_name(match, exact=True) except prawcore.exceptions.NotFound: if commanded: bot.reply('No such subreddit.') # Fail silently if it wasn't an explicit command. return plugin.NOLIMIT try: s = r.subreddit(match) s.subreddit_type except prawcore.exceptions.Forbidden: bot.reply(""r/"" + match + "" appears to be a private subreddit!"") return plugin.NOLIMIT except prawcore.exceptions.NotFound: bot.reply(""r/"" + match + "" appears to be a banned subreddit!"") return plugin.NOLIMIT link = ""https://reddit.com/r/"" + s.display_name created = get_time_created(bot, trigger, s.created_utc) message = ('{link}{nsfw} | {subscribers} subscribers | ' 'Created at {created} | {public_description}') nsfw = '' if s.over18: nsfw += ' ' + bold(color('[NSFW]', colors.RED)) sfw = bot.db.get_channel_value(trigger.sender, 'sfw') if sfw: link = '(link hidden)' bot.kick( trigger.nick, trigger.sender, 'Linking to NSFW content in a SFW channel.' ) message = message.format( link=link, nsfw=nsfw, subscribers='{:,}'.format(s.subscribers), created=created, public_description=s.public_description) bot.say(message) " 47279,"def postprocess_qa_predictions( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, null_score_diff_threshold: float = 0.0, output_dir: Optional[str] = None, prefix: Optional[str] = None, log_level: Optional[int] = logging.WARNING, ): """""" Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the original contexts. This is the base postprocessing functions for models that only return start and end logits. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0): The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example (note that the score of the null answer for an example giving several features is the minimum of the scores for the null answer on each feature: all features must be aligned on the fact they `want` to predict a null answer). Only useful when :obj:`version_2_with_negative` is :obj:`True`. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): ``logging`` log level (e.g., ``logging.WARNING``) """""" if not len(predictions) == 2: raise ValueError(""`predictions` should be a tuple with two elements (start_logits, end_logits)."") all_start_logits, all_end_logits = predictions if not len(predictions[0]) == len(features): raise ValueError(f""Got {len(predictions[0])} predictions and {len(features)} features."") # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples[""id""])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature[""example_id""]]].append(i) # The dictionaries we have to fill. all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() if version_2_with_negative: scores_diff_json = collections.OrderedDict() # Logging. logger.setLevel(log_level) logger.info(f""Post-processing {len(examples)} example predictions split into {len(features)} features."") # Let's loop over all the examples! for example_index, example in enumerate(tqdm(examples)): # Those are the indices of the features associated to the current example. feature_indices = features_per_example[example_index] min_null_prediction = None prelim_predictions = [] # Looping through all the features associated to the current example. for feature_index in feature_indices: # We grab the predictions of the model for this feature. start_logits = all_start_logits[feature_index] end_logits = all_end_logits[feature_index] # This is what will allow us to map some the positions in our logits to span of texts in the original # context. offset_mapping = features[feature_index][""offset_mapping""] # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context # available in the current feature. token_is_max_context = features[feature_index].get(""token_is_max_context"", None) # Update minimum null prediction. feature_null_score = start_logits[0] + end_logits[0] if min_null_prediction is None or min_null_prediction[""score""] > feature_null_score: min_null_prediction = { ""offsets"": (0, 0), ""score"": feature_null_score, ""start_logit"": start_logits[0], ""end_logit"": end_logits[0], } # Go through all possibilities for the `n_best_size` greater start and end logits. start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() for start_index in start_indexes: for end_index in end_indexes: # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond # to part of the input_ids that are not in the context. if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or offset_mapping[end_index] is None ): continue # Don't consider answers with a length that is either < 0 or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue # Don't consider answer that don't have the maximum context available (if such information is # provided). if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): continue prelim_predictions.append( { ""offsets"": (offset_mapping[start_index][0], offset_mapping[end_index][1]), ""score"": start_logits[start_index] + end_logits[end_index], ""start_logit"": start_logits[start_index], ""end_logit"": end_logits[end_index], } ) if version_2_with_negative: # Add the minimum null prediction prelim_predictions.append(min_null_prediction) null_score = min_null_prediction[""score""] # Only keep the best `n_best_size` predictions. predictions = sorted(prelim_predictions, key=lambda x: x[""score""], reverse=True)[:n_best_size] # Add back the minimum null prediction if it was removed because of its low score. if version_2_with_negative and not any(p[""offsets""] == (0, 0) for p in predictions): predictions.append(min_null_prediction) # Use the offsets to gather the answer text in the original context. context = example[""context""] for pred in predictions: offsets = pred.pop(""offsets"") pred[""text""] = context[offsets[0] : offsets[1]] # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid # failure. if len(predictions) == 0 or (len(predictions) == 1 and predictions[0][""text""] == """"): predictions.insert(0, {""text"": ""empty"", ""start_logit"": 0.0, ""end_logit"": 0.0, ""score"": 0.0}) # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using # the LogSumExp trick). scores = np.array([pred.pop(""score"") for pred in predictions]) exp_scores = np.exp(scores - np.max(scores)) probs = exp_scores / exp_scores.sum() # Include the probabilities in our predictions. for prob, pred in zip(probs, predictions): pred[""probability""] = prob # Pick the best prediction. If the null answer is not possible, this is easy. if not version_2_with_negative: all_predictions[example[""id""]] = predictions[0][""text""] else: # Otherwise we first need to find the best non-empty prediction. i = 0 while predictions[i][""text""] == """": i += 1 best_non_null_pred = predictions[i] # Then we compare to the null prediction using the threshold. score_diff = null_score - best_non_null_pred[""start_logit""] - best_non_null_pred[""end_logit""] scores_diff_json[example[""id""]] = float(score_diff) # To be JSON-serializable. if score_diff > null_score_diff_threshold: all_predictions[example[""id""]] = """" else: all_predictions[example[""id""]] = best_non_null_pred[""text""] # Make `predictions` JSON-serializable by casting np.float back to float. all_nbest_json[example[""id""]] = [ {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} for pred in predictions ] # If we have an output_dir, let's save all those dicts. if output_dir is not None: if not os.path.isdir(output_dir): raise EnvironmentError(f""{output_dir} is not a directory."") prediction_file = os.path.join( output_dir, ""predictions.json"" if prefix is None else f""{prefix}_predictions.json"" ) nbest_file = os.path.join( output_dir, ""nbest_predictions.json"" if prefix is None else f""{prefix}_nbest_predictions.json"" ) if version_2_with_negative: null_odds_file = os.path.join( output_dir, ""null_odds.json"" if prefix is None else f""{prefix}_null_odds.json"" ) logger.info(f""Saving predictions to {prediction_file}."") with open(prediction_file, ""w"") as writer: writer.write(json.dumps(all_predictions, indent=4) + ""\n"") logger.info(f""Saving nbest_preds to {nbest_file}."") with open(nbest_file, ""w"") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + ""\n"") if version_2_with_negative: logger.info(f""Saving null_odds to {null_odds_file}."") with open(null_odds_file, ""w"") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + ""\n"") return all_predictions ","def postprocess_qa_predictions( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, null_score_diff_threshold: float = 0.0, output_dir: Optional[str] = None, prefix: Optional[str] = None, log_level: Optional[int] = logging.WARNING, ): """""" Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the original contexts. This is the base postprocessing functions for models that only return start and end logits. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0): The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example (note that the score of the null answer for an example giving several features is the minimum of the scores for the null answer on each feature: all features must be aligned on the fact they `want` to predict a null answer). Only useful when :obj:`version_2_with_negative` is :obj:`True`. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): ``logging`` log level (e.g., ``logging.WARNING``) """""" if not len(predictions) == 2: raise ValueError(""`predictions` should be a tuple with two elements (start_logits, end_logits)."") all_start_logits, all_end_logits = predictions if len(predictions[0]) != len(features): raise ValueError(f""Got {len(predictions[0])} predictions and {len(features)} features."") # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples[""id""])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature[""example_id""]]].append(i) # The dictionaries we have to fill. all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() if version_2_with_negative: scores_diff_json = collections.OrderedDict() # Logging. logger.setLevel(log_level) logger.info(f""Post-processing {len(examples)} example predictions split into {len(features)} features."") # Let's loop over all the examples! for example_index, example in enumerate(tqdm(examples)): # Those are the indices of the features associated to the current example. feature_indices = features_per_example[example_index] min_null_prediction = None prelim_predictions = [] # Looping through all the features associated to the current example. for feature_index in feature_indices: # We grab the predictions of the model for this feature. start_logits = all_start_logits[feature_index] end_logits = all_end_logits[feature_index] # This is what will allow us to map some the positions in our logits to span of texts in the original # context. offset_mapping = features[feature_index][""offset_mapping""] # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context # available in the current feature. token_is_max_context = features[feature_index].get(""token_is_max_context"", None) # Update minimum null prediction. feature_null_score = start_logits[0] + end_logits[0] if min_null_prediction is None or min_null_prediction[""score""] > feature_null_score: min_null_prediction = { ""offsets"": (0, 0), ""score"": feature_null_score, ""start_logit"": start_logits[0], ""end_logit"": end_logits[0], } # Go through all possibilities for the `n_best_size` greater start and end logits. start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() for start_index in start_indexes: for end_index in end_indexes: # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond # to part of the input_ids that are not in the context. if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or offset_mapping[end_index] is None ): continue # Don't consider answers with a length that is either < 0 or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue # Don't consider answer that don't have the maximum context available (if such information is # provided). if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): continue prelim_predictions.append( { ""offsets"": (offset_mapping[start_index][0], offset_mapping[end_index][1]), ""score"": start_logits[start_index] + end_logits[end_index], ""start_logit"": start_logits[start_index], ""end_logit"": end_logits[end_index], } ) if version_2_with_negative: # Add the minimum null prediction prelim_predictions.append(min_null_prediction) null_score = min_null_prediction[""score""] # Only keep the best `n_best_size` predictions. predictions = sorted(prelim_predictions, key=lambda x: x[""score""], reverse=True)[:n_best_size] # Add back the minimum null prediction if it was removed because of its low score. if version_2_with_negative and not any(p[""offsets""] == (0, 0) for p in predictions): predictions.append(min_null_prediction) # Use the offsets to gather the answer text in the original context. context = example[""context""] for pred in predictions: offsets = pred.pop(""offsets"") pred[""text""] = context[offsets[0] : offsets[1]] # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid # failure. if len(predictions) == 0 or (len(predictions) == 1 and predictions[0][""text""] == """"): predictions.insert(0, {""text"": ""empty"", ""start_logit"": 0.0, ""end_logit"": 0.0, ""score"": 0.0}) # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using # the LogSumExp trick). scores = np.array([pred.pop(""score"") for pred in predictions]) exp_scores = np.exp(scores - np.max(scores)) probs = exp_scores / exp_scores.sum() # Include the probabilities in our predictions. for prob, pred in zip(probs, predictions): pred[""probability""] = prob # Pick the best prediction. If the null answer is not possible, this is easy. if not version_2_with_negative: all_predictions[example[""id""]] = predictions[0][""text""] else: # Otherwise we first need to find the best non-empty prediction. i = 0 while predictions[i][""text""] == """": i += 1 best_non_null_pred = predictions[i] # Then we compare to the null prediction using the threshold. score_diff = null_score - best_non_null_pred[""start_logit""] - best_non_null_pred[""end_logit""] scores_diff_json[example[""id""]] = float(score_diff) # To be JSON-serializable. if score_diff > null_score_diff_threshold: all_predictions[example[""id""]] = """" else: all_predictions[example[""id""]] = best_non_null_pred[""text""] # Make `predictions` JSON-serializable by casting np.float back to float. all_nbest_json[example[""id""]] = [ {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} for pred in predictions ] # If we have an output_dir, let's save all those dicts. if output_dir is not None: if not os.path.isdir(output_dir): raise EnvironmentError(f""{output_dir} is not a directory."") prediction_file = os.path.join( output_dir, ""predictions.json"" if prefix is None else f""{prefix}_predictions.json"" ) nbest_file = os.path.join( output_dir, ""nbest_predictions.json"" if prefix is None else f""{prefix}_nbest_predictions.json"" ) if version_2_with_negative: null_odds_file = os.path.join( output_dir, ""null_odds.json"" if prefix is None else f""{prefix}_null_odds.json"" ) logger.info(f""Saving predictions to {prediction_file}."") with open(prediction_file, ""w"") as writer: writer.write(json.dumps(all_predictions, indent=4) + ""\n"") logger.info(f""Saving nbest_preds to {nbest_file}."") with open(nbest_file, ""w"") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + ""\n"") if version_2_with_negative: logger.info(f""Saving null_odds to {null_odds_file}."") with open(null_odds_file, ""w"") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + ""\n"") return all_predictions " 38938,"def test_bytesize_to(): class Model(BaseModel): size: ByteSize m = Model(size='1GiB') assert pytest.approx(m.size.to('MiB')) == 1024 assert pytest.approx(m.size.to('MB')) == 1073.741824 assert pytest.approx(m.size.to('TiB')) == 0.0009765625 ","def test_bytesize_to(): class Model(BaseModel): size: ByteSize m = Model(size='1GiB') assert m.size.to('MiB') == pytest.approx(1024) assert pytest.approx(m.size.to('MB')) == 1073.741824 assert pytest.approx(m.size.to('TiB')) == 0.0009765625 " 57925,"def main(): LOG('Command to be executed is {}.'.format(demisto.command())) handle_proxy() try: if demisto.command() == 'test-module': demisto.results(validate_snx_api_key()) if demisto.command() == 'ip': ip_command() elif demisto.command() == 'domain': domain_command() elif demisto.command() == 'slashnext-host-reputation': host_reputation_command() elif demisto.command() == 'slashnext-host-report': host_report_command() elif demisto.command() == 'slashnext-host-urls': host_urls_command() elif demisto.command() == 'slashnext-url-reputation': url_reputation_command() elif demisto.command() == 'slashnext-url-scan': url_scan_command() elif demisto.command() == 'slashnext-url-scan-sync': url_scan_sync_command() elif demisto.command() == 'slashnext-scan-report': scan_report_command() elif demisto.command() == 'slashnext-download-screenshot': download_screenshot_command() elif demisto.command() == 'slashnext-download-html': download_html_command() elif demisto.command() == 'slashnext-download-text': download_text_command() elif demisto.command() == 'slashnext-api-quota': api_quota_command() except Exception as e: return_error(str(e)) ","def main(): LOG('Command to be executed is {}.'.format(demisto.command())) handle_proxy() try: if demisto.command() == 'test-module': demisto.results(validate_snx_api_key()) if demisto.command() == 'ip': ip_command() elif demisto.command() == 'domain': domain_command() elif demisto.command() == 'slashnext-host-reputation': host_reputation_command() elif demisto.command() == 'slashnext-host-report': host_report_command() elif demisto.command() == 'slashnext-host-urls': host_urls_command() elif demisto.command() == 'url': url_command() elif demisto.command() == 'slashnext-url-scan': url_scan_command() elif demisto.command() == 'slashnext-url-scan-sync': url_scan_sync_command() elif demisto.command() == 'slashnext-scan-report': scan_report_command() elif demisto.command() == 'slashnext-download-screenshot': download_screenshot_command() elif demisto.command() == 'slashnext-download-html': download_html_command() elif demisto.command() == 'slashnext-download-text': download_text_command() elif demisto.command() == 'slashnext-api-quota': api_quota_command() except Exception as e: return_error(str(e)) " 57885,"def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" args = demisto.args() params = demisto.params() api_key = params.get('apikey') api_key_id = params.get('apikey_id') base_url = urljoin(params['url'], '/public_api/v1') verify_cert = not params.get('insecure', False) proxy = params.get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') try: nonce = """".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)]) timestamp = str(int(datetime.now(timezone.utc).timestamp()) * 1000) auth_key = ""%s%s%s"" % (api_key, nonce, timestamp) api_key_hash = hashlib.sha256(auth_key.encode(""utf-8"")).hexdigest() headers = { ""x-xdr-timestamp"": str(timestamp), ""x-xdr-nonce"": nonce, ""x-xdr-auth-id"": str(api_key_id), ""Authorization"": api_key_hash } client = Client( base_url=base_url, verify=verify_cert, headers=headers, proxy=proxy) generic_commands = init_generic_commands() built_in_commands = init_built_in_commands() if command in generic_commands: return_results(generic_commands[command](client, args)) elif command in built_in_commands: return_results(get_built_in_query_results_polling_command(client, args)) else: raise NotImplementedError(f'Command {command} does not exist.') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError: {str(e)}') finally: get_integration_context().clear() ","def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" args = demisto.args() params = demisto.params() api_key = params.get('apikey') api_key_id = params.get('apikey_id') base_url = urljoin(params['url'], '/public_api/v1') verify_cert = not params.get('insecure', False) proxy = params.get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') try: nonce = """".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)]) timestamp = str(int(datetime.now(timezone.utc).timestamp()) * 1000) auth_key = ""%s%s%s"" % (api_key, nonce, timestamp) api_key_hash = hashlib.sha256(auth_key.encode(""utf-8"")).hexdigest() headers = { ""x-xdr-timestamp"": str(timestamp), ""x-xdr-nonce"": nonce, ""x-xdr-auth-id"": str(api_key_id), ""Authorization"": api_key_hash } client = Client( base_url=base_url, verify=verify_cert, headers=headers, proxy=proxy, ) generic_commands = init_generic_commands() built_in_commands = init_built_in_commands() if command in generic_commands: return_results(generic_commands[command](client, args)) elif command in built_in_commands: return_results(get_built_in_query_results_polling_command(client, args)) else: raise NotImplementedError(f'Command {command} does not exist.') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError: {str(e)}') finally: get_integration_context().clear() " 15388,"def test_purge_old_recorder_runs(hass, hass_recorder): """"""Test deleting old recorder runs keeps current run."""""" hass = hass_recorder() _add_test_recorder_runs(hass) # make sure we start with 6 states with session_scope(hass=hass) as session: recorder_runs = session.query(RecorderRuns) assert recorder_runs.count() == 7 # run purge_old_data() finished = purge_old_data(hass.data[DATA_INSTANCE], 0, repack=False) assert finished assert recorder_runs.count() == 1 ","def test_purge_old_recorder_runs(hass, hass_recorder): """"""Test deleting old recorder runs keeps current run."""""" hass = hass_recorder() _add_test_recorder_runs(hass) # make sure we start with 7 recorder runs with session_scope(hass=hass) as session: recorder_runs = session.query(RecorderRuns) assert recorder_runs.count() == 7 # run purge_old_data() finished = purge_old_data(hass.data[DATA_INSTANCE], 0, repack=False) assert finished assert recorder_runs.count() == 1 " 17348,"def _validate_axis(data, axis): ndim = data.ndim if not -ndim <= axis < ndim: raise IndexError(f""axis {axis!r} out of bounds [-{ndim!r}, {ndim!r})"") if axis < 0: axis += ndim return axis ","def _validate_axis(data, axis): ndim = data.ndim if not -ndim <= axis < ndim: raise IndexError(f""axis {axis!r} out of bounds [-{ndim}, {ndim})"") if axis < 0: axis += ndim return axis " 34868,"def _convert_dense(insym, keras_layer, symtab): weightList = keras_layer.get_weights() weight = symtab.new_const(weightList[0].transpose([1, 0])) params = {'weight':weight, 'use_bias':False, 'units':weightList[0].shape[1]} if keras_layer.use_bias: params['use_bias'] = True params['bias'] = symtab.new_const(weightList[1]) input_shape = keras_layer.input_shape input_dim = len(input_shape) # In case of RNN dense, input shape will be (1, 1, n) if input_dim > 2: input_shape = tuple(dim if dim else 1 for dim in _as_list(input_shape)[0]) if input_dim != 3 and input_shape[0] != input_shape[1] != 1: raise ValueError(""Cannot flatten the inputs with shape."", input_shape, "" for dense."") insym = _sym.squeeze(insym, axis=0) out = _sym.dense(data=insym, **params) # defuse activation if sys.version_info.major < 3: act_type = keras_layer.activation.func_name else: act_type = keras_layer.activation.__name__ if act_type != 'linear': out = _convert_activation(out, act_type, symtab) if input_dim > 2: out = _sym.expand_dims(out, axis=0) return out ","def _convert_dense(insym, keras_layer, symtab): weightList = keras_layer.get_weights() weight = symtab.new_const(weightList[0].transpose([1, 0])) params = {'weight':weight, 'use_bias':False, 'units':weightList[0].shape[1]} if keras_layer.use_bias: params['use_bias'] = True params['bias'] = symtab.new_const(weightList[1]) input_shape = keras_layer.input_shape input_dim = len(input_shape) # In case of RNN dense, input shape will be (1, 1, n) if input_dim > 2: input_shape = tuple(dim if dim else 1 for dim in _as_list(input_shape)[0]) if input_dim != 3 or input_shape[0] != 1 or input_shape[1] != 1: raise ValueError(""Cannot flatten the inputs with shape."", input_shape, "" for dense."") insym = _sym.squeeze(insym, axis=0) out = _sym.dense(data=insym, **params) # defuse activation if sys.version_info.major < 3: act_type = keras_layer.activation.func_name else: act_type = keras_layer.activation.__name__ if act_type != 'linear': out = _convert_activation(out, act_type, symtab) if input_dim > 2: out = _sym.expand_dims(out, axis=0) return out " 13206,"def default_deposit_spam_handling(deposit=None, community=None): """"""Default actions to counter spam detected record."""""" if deposit: user = User.query.get(deposit['_deposit']['owners'][0]) if community: user = community.owner community.description = '--SPAM--' + community.description if community.oaiset: db.session.delete(community.oaiset) community.delete() user.active = False delete_user_sessions(user) logout_user() db.session.add(user) db.session.commit() send_spam_user_email(user.email, deposit=deposit, community=community) if current_app.config['ZENODO_SPAM_EMAIL_ADMINS']: send_spam_admin_email(user, deposit=deposit) if deposit: error_message = \ ('Our spam protection system has classified your upload as a ' 'potential spam attempt. As a preventive measure and due to ' 'significant increase in spam, we have therefore deactivated ' 'your user account and logged you out of Zenodo. Your upload has ' 'not been published. If you think this is a mistake, please ' 'contact our support.') if community: error_message = \ ('Our spam protection system has classified your community as a ' 'potential spam attempt. As a preventive measure and due to ' 'significant increase in spam, we have therefore deactivated ' 'your user account and logged you out of Zenodo. Your community ' 'has not been created. If you think this is a mistake, please ' 'contact our support.') flash(error_message, category='warning') abort(400, error_message) ","def default_spam_handling(deposit=None, community=None): """"""Default actions to counter spam detected record."""""" if deposit: user = User.query.get(deposit['_deposit']['owners'][0]) if community: user = community.owner community.description = '--SPAM--' + community.description if community.oaiset: db.session.delete(community.oaiset) community.delete() user.active = False delete_user_sessions(user) logout_user() db.session.add(user) db.session.commit() send_spam_user_email(user.email, deposit=deposit, community=community) if current_app.config['ZENODO_SPAM_EMAIL_ADMINS']: send_spam_admin_email(user, deposit=deposit) if deposit: error_message = \ ('Our spam protection system has classified your upload as a ' 'potential spam attempt. As a preventive measure and due to ' 'significant increase in spam, we have therefore deactivated ' 'your user account and logged you out of Zenodo. Your upload has ' 'not been published. If you think this is a mistake, please ' 'contact our support.') if community: error_message = \ ('Our spam protection system has classified your community as a ' 'potential spam attempt. As a preventive measure and due to ' 'significant increase in spam, we have therefore deactivated ' 'your user account and logged you out of Zenodo. Your community ' 'has not been created. If you think this is a mistake, please ' 'contact our support.') flash(error_message, category='warning') abort(400, error_message) " 12467,"def enum_value_callback(ctx: 'mypy.plugin.AttributeContext') -> Type: """"""This plugin refines the 'value' attribute in enums to refer to the original underlying value. For example, suppose we have the following: class SomeEnum: FOO = A() BAR = B() By default, mypy will infer that 'SomeEnum.FOO.value' and 'SomeEnum.BAR.value' both are of type 'Any'. This plugin refines this inference so that mypy understands the expressions are actually of types 'A' and 'B' respectively. This better reflects the actual runtime behavior. This plugin works simply by looking up the original value assigned to the enum. For example, when this plugin sees 'SomeEnum.BAR.value', it will look up whatever type 'BAR' had in the SomeEnum TypeInfo and use that as the inferred type of the overall expression. This plugin assumes that the provided context is an attribute access matching one of the strings found in 'ENUM_VALUE_ACCESS'. """""" enum_field_name = _extract_underlying_field_name(ctx.type) if enum_field_name is None: # We do not know the ennum field name (perhaps it was passed to a function and we only # know that it _is_ a member). All is not lost however, if we can prove that the all # of the enum members have the same value-type, then it doesn't matter which member # was passed in. The value-type is still known. if isinstance(ctx.type, Instance): info = ctx.type.type stnodes = (info.get(name) for name in info.names) first_node = next(stnodes, None) if first_node is None: return ctx.default_attr_type first_node_type = first_node.type if all(node is not None and node.type == first_node_type for node in stnodes): underlying_type = get_proper_type(first_node_type) if underlying_type is not None: return underlying_type return ctx.default_attr_type assert isinstance(ctx.type, Instance) info = ctx.type.type stnode = info.get(enum_field_name) if stnode is None: return ctx.default_attr_type underlying_type = get_proper_type(stnode.type) if underlying_type is None: # TODO: Deduce the inferred type if the user omits adding their own default types. # TODO: Consider using the return type of `Enum._generate_next_value_` here? return ctx.default_attr_type if isinstance(underlying_type, Instance) and underlying_type.type.fullname == 'enum.auto': # TODO: Deduce the correct inferred type when the user uses 'enum.auto'. # We should use the same strategy we end up picking up above. return ctx.default_attr_type return underlying_type ","def enum_value_callback(ctx: 'mypy.plugin.AttributeContext') -> Type: """"""This plugin refines the 'value' attribute in enums to refer to the original underlying value. For example, suppose we have the following: class SomeEnum: FOO = A() BAR = B() By default, mypy will infer that 'SomeEnum.FOO.value' and 'SomeEnum.BAR.value' both are of type 'Any'. This plugin refines this inference so that mypy understands the expressions are actually of types 'A' and 'B' respectively. This better reflects the actual runtime behavior. This plugin works simply by looking up the original value assigned to the enum. For example, when this plugin sees 'SomeEnum.BAR.value', it will look up whatever type 'BAR' had in the SomeEnum TypeInfo and use that as the inferred type of the overall expression. This plugin assumes that the provided context is an attribute access matching one of the strings found in 'ENUM_VALUE_ACCESS'. """""" enum_field_name = _extract_underlying_field_name(ctx.type) if enum_field_name is None: # We do not know the enum field name (perhaps it was passed to a function and we only # know that it _is_ a member). All is not lost however, if we can prove that the all # of the enum members have the same value-type, then it doesn't matter which member # was passed in. The value-type is still known. if isinstance(ctx.type, Instance): info = ctx.type.type stnodes = (info.get(name) for name in info.names) first_node = next(stnodes, None) if first_node is None: return ctx.default_attr_type first_node_type = first_node.type if all(node is not None and node.type == first_node_type for node in stnodes): underlying_type = get_proper_type(first_node_type) if underlying_type is not None: return underlying_type return ctx.default_attr_type assert isinstance(ctx.type, Instance) info = ctx.type.type stnode = info.get(enum_field_name) if stnode is None: return ctx.default_attr_type underlying_type = get_proper_type(stnode.type) if underlying_type is None: # TODO: Deduce the inferred type if the user omits adding their own default types. # TODO: Consider using the return type of `Enum._generate_next_value_` here? return ctx.default_attr_type if isinstance(underlying_type, Instance) and underlying_type.type.fullname == 'enum.auto': # TODO: Deduce the correct inferred type when the user uses 'enum.auto'. # We should use the same strategy we end up picking up above. return ctx.default_attr_type return underlying_type " 31696,"def ioc_from_url_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]: """""" Returns the results of the Parse IOCs from URL API call Args: client: IOCParser client to use args: All command arguments, ulr, limit and keys (if specified) Returns: CommandResults object containing the results of the parse from url as returned from the API and its readable output """""" url = args.get('url') keys = argToList(args.get('keys')) limit = args.get('limit') if not keys: keys = KEYS keys = list_to_upper_case(keys) if not url: raise ValueError('url not specified') response = client.ioc_from_url(url) response_data = process_response(response, keys, limit) command_results = [] outputs = {'url': url, 'Results': []} for key, values in response_data.items(): for value in values: outputs['Results'].append({'type': key, 'value': value}) for ioc_type, iocs in response_data.items(): command_results.append(CommandResults( readable_output=tableToMarkdown(f'results for {ioc_type} from {url}', iocs, headers=ioc_type), outputs_prefix=f'IOCParser.parseFromUrl', outputs=outputs )) command_results.append(CommandResults( raw_response=response_data )) return command_results ","def ioc_from_url_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]: """""" Returns the results of the Parse IOCs from URL API call Args: client: IOCParser client to use args: All command arguments, ulr, limit and keys (if specified) Returns: CommandResults object containing the results of the parse from url as returned from the API and its readable output """""" url = args.get('url') keys = argToList(args.get('keys')) or KEYS limit = args.get('limit') if not keys: keys = KEYS keys = list_to_upper_case(keys) if not url: raise ValueError('url not specified') response = client.ioc_from_url(url) response_data = process_response(response, keys, limit) command_results = [] outputs = {'url': url, 'Results': []} for key, values in response_data.items(): for value in values: outputs['Results'].append({'type': key, 'value': value}) for ioc_type, iocs in response_data.items(): command_results.append(CommandResults( readable_output=tableToMarkdown(f'results for {ioc_type} from {url}', iocs, headers=ioc_type), outputs_prefix=f'IOCParser.parseFromUrl', outputs=outputs )) command_results.append(CommandResults( raw_response=response_data )) return command_results " 14157,"def _pointer_str(obj): """""" Get the memory address of obj as used in object.__repr__. This is equivalent to ``sprintf(""%p"", id(obj))``, but python does not support ``%p``. """""" full_repr = object.__repr__(obj) # gives ""<{type} object at {address}>"" return full_repr.rsplit(' ', 1)[1][:-1] ","def _pointer_str(obj): """""" Get the memory address of *obj* as used in ``object.__repr__``. This is equivalent to ``sprintf(""%p"", id(obj))``, but python does not support ``%p``. """""" full_repr = object.__repr__(obj) # gives ""<{type} object at {address}>"" return full_repr.rsplit(' ', 1)[1][:-1] " 24635,"def find_ion_saturation_current( voltage: np.ndarray, current: np.ndarray, *, fit_type: str = ""exp_plus_linear"", current_bound: float = None, voltage_bound: float = None, ) -> Tuple[ffuncs.Linear, ISatExtras]: """""" Determines the ion-saturation current (:math:`I_{sat}`) for a given current-voltage (IV) curve obtained from a swept Langmuir probe. The current collected by a Langmuir probe reaches ion-saturation when the probe is sufficiently biased so the influx of electrons is completely repelled leading to only the collection of ions. (For additional details see the **Notes** section below.) **Aliases:** `find_isat_` Parameters ---------- voltage: `numpy.ndarray` 1-D numpy array of monotonically increasing probe biases (should be in volts). current: `numpy.ndarray` 1-D numpy array of probe current (should be in amperes) corresponding to the ``voltage`` array. fit_type: `str` The type of curve (:term:`fit-function`) to be fitted to the Langmuir trace, valid options are listed below. (DEFAULT ``""exp_plus_linear""``) +-----------------------+----------------------------------------------------------+ | ``""linear""`` | `~plasmapy.analysis.fit_functions.Linear` | +-----------------------+----------------------------------------------------------+ | ``""exponential""`` | `~plasmapy.analysis.fit_functions.ExponentialPlusOffset` | +-----------------------+----------------------------------------------------------+ | ``""exp_plus_linear""`` | `~plasmapy.analysis.fit_functions.ExponentialPlusLinear` | +-----------------------+----------------------------------------------------------+ current_bound: `float` A fraction representing a percentile window around the minimum current for which to collect the points. For example, a value of ``0.1`` indicates to use all points within 10% of the minimum current. (DEFAULT ``None``) | If neither ``current_bound`` or ``voltage_bound`` are specified, then the routine will collect indices based on an internal ``current_bound`` setting for the specified ``fit_type``. +-----------------------+--------------------------------------+ | ``""linear""`` | 0.4 | +-----------------------+--------------------------------------+ | ``""exponential""`` | 1.0 | +-----------------------+--------------------------------------+ | ``""exp_plus_linear""`` | 1.0 | +-----------------------+--------------------------------------+ voltage_bound: `float` A bias voltage (in volts) that specifies an upper bound used to collect the points for the curve fit. That is, points that satisfy ``voltage <= voltage_bound`` are used in the fit. (DEFAULT ``None``) Returns ------- isat: `~plasmapy.analysis.fit_functions.Linear` A :term:`fit-function` representing the linear portion of the fitter curve. extras: `ISatExtras` Additional information from the curve fit: * ``extras.fitted_func`` is the :term:`fit-function` (specified by ``fit_type``) fitted to the IV-curve * ``extras.rsq`` is the coefficient of determination (r-squared) value of the ``extras.fitted_func`` to the IV-curve * ``extras.fitted_indices`` is a `slice` object representing the points used in the curve fit (i.e. ``(voltage[extras.fitted_indices], current[extras.fitted_indices])``). Notes ----- This routine works by: 1. Selecting the points to be used in the fit as determined by ``voltage_bound`` or ``current_bound``. 2. Fitting the selected points with the :term:`fit-function` specified by ``fit_type``. 3. Extracting the linear component of the fit and returning that as the ion-saturation current. This routine opts to return a function representing a linear ion-saturation current, since, while ideal planar Langmuir probes reach a steady-state ion-saturation current, real world Langmuir probes ""suffer"" from expanding sheaths as the bias voltage increases. This sheath expansion results the ion-saturation current also increasing. """""" rtn_extras = ISatExtras(rsq=None, fitted_func=None, fitted_indices=None)._asdict() _settings = { ""linear"": { ""func"": ffuncs.Linear, ""current_bound"": 0.4, }, ""exp_plus_linear"": { ""func"": ffuncs.ExponentialPlusLinear, ""current_bound"": 1.0, }, ""exp_plus_offset"": { ""func"": ffuncs.ExponentialPlusOffset, ""current_bound"": 1.0, }, } try: default_current_bound = _settings[fit_type][""current_bound""] fit_func = _settings[fit_type][""func""]() rtn_extras[""fitted_func""] = fit_func except KeyError: raise ValueError( f""Requested fit '{fit_type}' is not a valid option. Valid options "" f""are {list(_settings.keys())}."" ) # check voltage and current arrays voltage, current = check_sweep(voltage, current, strip_units=True) # condition kwargs voltage_bound and current_bound if voltage_bound is None and current_bound is None: current_bound = default_current_bound elif voltage_bound is not None and current_bound is not None: raise ValueError( ""Both keywords 'current_bound' and `voltage_bound' are specified, "" ""use only one."" ) if current_bound is not None: if not isinstance(current_bound, numbers.Real): raise TypeError( f""Keyword 'current_bound' is of type {type(current_bound)}, "" f""expected an int or float."" ) current_min = current.min() current_bound = (1.0 - current_bound) * current_min mask = np.where(current <= current_bound)[0] else: # voltage_bound is not None if not isinstance(voltage_bound, numbers.Real): raise TypeError( f""Keyword 'voltage_bound' is of type {type(voltage_bound)}, "" f""expected an int or float."" ) mask = np.where(voltage <= voltage_bound)[0] if mask.size == 0: raise ValueError( f""The specified bounding keywords, 'voltage_bound' "" f""({voltage_bound}) and 'current_bound' ({current_bound}), "" f""resulted in a fit window containing no points."" ) mask = slice(0, mask[-1] + 1) rtn_extras[""fitted_indices""] = mask volt_sub = voltage[mask] curr_sub = current[mask] fit_func.curve_fit(volt_sub, curr_sub) rtn_extras[""rsq""] = fit_func.rsq m = getattr(fit_func.params, ""m"", 0.0) b = getattr(fit_func.params, ""b"", 0.0) m_err = getattr(fit_func.param_errors, ""m"", 0.0) b_err = getattr(fit_func.param_errors, ""b"", 0.0) isat = ffuncs.Linear(params=(m, b), param_errors=(m_err, b_err)) return isat, ISatExtras(**rtn_extras) ","def find_ion_saturation_current( voltage: np.ndarray, current: np.ndarray, *, fit_type: str = ""exp_plus_linear"", current_bound: float = None, voltage_bound: float = None, ) -> Tuple[ffuncs.Linear, ISatExtras]: """""" Determines the ion-saturation current (:math:`I_{sat}`) for a given current-voltage (IV) curve obtained from a swept Langmuir probe. The current collected by a Langmuir probe reaches ion-saturation when the probe is sufficiently biased so the influx of electrons is completely repelled leading to only the collection of ions. (For additional details see the **Notes** section below.) **Aliases:** `find_isat_` Parameters ---------- voltage: `numpy.ndarray` 1-D numpy array of monotonically increasing probe biases (should be in volts). current: `numpy.ndarray` 1-D numpy array of probe current (should be in amperes) corresponding to the ``voltage`` array. fit_type: `str` The type of curve (:term:`fit-function`) to be fitted to the Langmuir trace, valid options are listed below. Defaults to ``""exp_plus_linear""``. +-----------------------+----------------------------------------------------------+ | ``""linear""`` | `~plasmapy.analysis.fit_functions.Linear` | +-----------------------+----------------------------------------------------------+ | ``""exponential""`` | `~plasmapy.analysis.fit_functions.ExponentialPlusOffset` | +-----------------------+----------------------------------------------------------+ | ``""exp_plus_linear""`` | `~plasmapy.analysis.fit_functions.ExponentialPlusLinear` | +-----------------------+----------------------------------------------------------+ current_bound: `float` A fraction representing a percentile window around the minimum current for which to collect the points. For example, a value of ``0.1`` indicates to use all points within 10% of the minimum current. (DEFAULT ``None``) | If neither ``current_bound`` or ``voltage_bound`` are specified, then the routine will collect indices based on an internal ``current_bound`` setting for the specified ``fit_type``. +-----------------------+--------------------------------------+ | ``""linear""`` | 0.4 | +-----------------------+--------------------------------------+ | ``""exponential""`` | 1.0 | +-----------------------+--------------------------------------+ | ``""exp_plus_linear""`` | 1.0 | +-----------------------+--------------------------------------+ voltage_bound: `float` A bias voltage (in volts) that specifies an upper bound used to collect the points for the curve fit. That is, points that satisfy ``voltage <= voltage_bound`` are used in the fit. (DEFAULT ``None``) Returns ------- isat: `~plasmapy.analysis.fit_functions.Linear` A :term:`fit-function` representing the linear portion of the fitter curve. extras: `ISatExtras` Additional information from the curve fit: * ``extras.fitted_func`` is the :term:`fit-function` (specified by ``fit_type``) fitted to the IV-curve * ``extras.rsq`` is the coefficient of determination (r-squared) value of the ``extras.fitted_func`` to the IV-curve * ``extras.fitted_indices`` is a `slice` object representing the points used in the curve fit (i.e. ``(voltage[extras.fitted_indices], current[extras.fitted_indices])``). Notes ----- This routine works by: 1. Selecting the points to be used in the fit as determined by ``voltage_bound`` or ``current_bound``. 2. Fitting the selected points with the :term:`fit-function` specified by ``fit_type``. 3. Extracting the linear component of the fit and returning that as the ion-saturation current. This routine opts to return a function representing a linear ion-saturation current, since, while ideal planar Langmuir probes reach a steady-state ion-saturation current, real world Langmuir probes ""suffer"" from expanding sheaths as the bias voltage increases. This sheath expansion results the ion-saturation current also increasing. """""" rtn_extras = ISatExtras(rsq=None, fitted_func=None, fitted_indices=None)._asdict() _settings = { ""linear"": { ""func"": ffuncs.Linear, ""current_bound"": 0.4, }, ""exp_plus_linear"": { ""func"": ffuncs.ExponentialPlusLinear, ""current_bound"": 1.0, }, ""exp_plus_offset"": { ""func"": ffuncs.ExponentialPlusOffset, ""current_bound"": 1.0, }, } try: default_current_bound = _settings[fit_type][""current_bound""] fit_func = _settings[fit_type][""func""]() rtn_extras[""fitted_func""] = fit_func except KeyError: raise ValueError( f""Requested fit '{fit_type}' is not a valid option. Valid options "" f""are {list(_settings.keys())}."" ) # check voltage and current arrays voltage, current = check_sweep(voltage, current, strip_units=True) # condition kwargs voltage_bound and current_bound if voltage_bound is None and current_bound is None: current_bound = default_current_bound elif voltage_bound is not None and current_bound is not None: raise ValueError( ""Both keywords 'current_bound' and `voltage_bound' are specified, "" ""use only one."" ) if current_bound is not None: if not isinstance(current_bound, numbers.Real): raise TypeError( f""Keyword 'current_bound' is of type {type(current_bound)}, "" f""expected an int or float."" ) current_min = current.min() current_bound = (1.0 - current_bound) * current_min mask = np.where(current <= current_bound)[0] else: # voltage_bound is not None if not isinstance(voltage_bound, numbers.Real): raise TypeError( f""Keyword 'voltage_bound' is of type {type(voltage_bound)}, "" f""expected an int or float."" ) mask = np.where(voltage <= voltage_bound)[0] if mask.size == 0: raise ValueError( f""The specified bounding keywords, 'voltage_bound' "" f""({voltage_bound}) and 'current_bound' ({current_bound}), "" f""resulted in a fit window containing no points."" ) mask = slice(0, mask[-1] + 1) rtn_extras[""fitted_indices""] = mask volt_sub = voltage[mask] curr_sub = current[mask] fit_func.curve_fit(volt_sub, curr_sub) rtn_extras[""rsq""] = fit_func.rsq m = getattr(fit_func.params, ""m"", 0.0) b = getattr(fit_func.params, ""b"", 0.0) m_err = getattr(fit_func.param_errors, ""m"", 0.0) b_err = getattr(fit_func.param_errors, ""b"", 0.0) isat = ffuncs.Linear(params=(m, b), param_errors=(m_err, b_err)) return isat, ISatExtras(**rtn_extras) " 12181,"def test_long_error_stack(): # clear out jobs table schema.schema.jobs.delete() # create long error stack STACK_SIZE = 89942 # Does not fit into small blob (should be 64k, but found to be higher) long_error_stack = ''.join(random.choice(string.ascii_letters) for _ in range(STACK_SIZE)) assert_true(subjects) table_name = 'fake_table' key = subjects.fetch('KEY')[0] # test long error stack schema.schema.jobs.reserve(table_name, key) schema.schema.jobs.error(table_name, key, 'error message', long_error_stack) error_stack = schema.schema.jobs.fetch1('error_stack') assert_true(error_stack == long_error_stack, 'error stacks do not agree') schema.schema.jobs.delete() ","def test_long_error_stack(): # clear out jobs table schema.schema.jobs.delete() # create long error stack STACK_SIZE = 89942 # Does not fit into small blob (should be 64k, but found to be higher) long_error_stack = ''.join(random.choice(string.ascii_letters) for _ in range(STACK_SIZE)) assert_true(subjects) table_name = 'fake_table' key = subjects.fetch('KEY')[0] # test long error stack schema.schema.jobs.reserve(table_name, key) schema.schema.jobs.error(table_name, key, 'error message', long_error_stack) error_stack = schema.schema.jobs.fetch1('error_stack') assert error_stack == long_error_stack, 'error stacks do not agree' schema.schema.jobs.delete() " 45977,"def _get_convex_edges(polygon: torch.Tensor, h: int, w: int): r""""""Gets the left and right edges of a polygon for each y-coordinate y \in [0, h) Args: polygon: represents polygons to draw in BxNx2 N is the number of points 2 is (x, y). h: bottom most coordinate (top coordinate is assumed to be 0) w: right most coordinate (left coordinate is assumed to be 0) Returns: The left and right edges of the polygon of shape Bx2. """""" if not torch.allclose(polygon[..., -1, :], polygon[..., 0, :]): polygon = torch.cat((polygon, polygon[..., :1, :]), dim=-2) x_start = polygon[..., :-1, 0] x_end = polygon[..., 1:, 0] y_start = polygon[..., :-1, 1] y_end = polygon[..., 1:, 1] ys = torch.arange(h, device=polygon.device) dx = torch.clamp((x_end - x_start) / (y_end - y_start), -w, w) xs = (ys[..., :, None] - y_start[..., None, :]) * dx[..., None, :] + x_start[..., None, :] valid_candidates = (y_start[..., None, :] <= ys[..., :, None]) & (ys[..., :, None] <= y_end[..., None, :]) valid_candidates |= (y_start[..., None, :] >= ys[..., :, None]) & (ys[..., :, None] >= y_end[..., None, :]) x_left_cand = xs.clone() x_left_cand[~valid_candidates] = w x_right_cand = xs.clone() x_right_cand[~valid_candidates] = -1 x_left = x_left_cand.min(dim=-1).values x_right = x_right_cand.max(dim=-1).values return x_left, x_right ","def _get_convex_edges(polygon: torch.Tensor, h: int, w: int): r""""""Gets the left and right edges of a polygon for each y-coordinate y \in [0, h) Args: polygon: represents polygons to draw in BxNx2 N is the number of points 2 is (x, y). h: bottom most coordinate (top coordinate is assumed to be 0) w: right most coordinate (left coordinate is assumed to be 0) Returns: The left and right edges of the polygon of shape Bx2. """""" if not torch.allclose(polygon[..., -1, :], polygon[..., 0, :]): polygon = torch.cat((polygon, polygon[..., :1, :]), dim=-2) x_start = polygon[..., :-1, 0] x_end = polygon[..., 1:, 0] y_start = polygon[..., :-1, 1] y_end = polygon[..., 1:, 1] ys = torch.arange(h, device=polygon.device) dx = ((x_end - x_start) / (y_end - y_start)).clamp(min=-w, max=w) xs = (ys[..., :, None] - y_start[..., None, :]) * dx[..., None, :] + x_start[..., None, :] valid_candidates = (y_start[..., None, :] <= ys[..., :, None]) & (ys[..., :, None] <= y_end[..., None, :]) valid_candidates |= (y_start[..., None, :] >= ys[..., :, None]) & (ys[..., :, None] >= y_end[..., None, :]) x_left_cand = xs.clone() x_left_cand[~valid_candidates] = w x_right_cand = xs.clone() x_right_cand[~valid_candidates] = -1 x_left = x_left_cand.min(dim=-1).values x_right = x_right_cand.max(dim=-1).values return x_left, x_right " 6746,"def initializeEnvironment(count: int, pid: object) -> Dict[str, str]: """""" Create a copy of the process environment and add I{LISTEN_FDS} and I{LISTEN_PID} (the environment variables set by systemd) to it. """""" result = os.environ.copy() result[""LISTEN_FDS""] = str(count) result[""LISTEN_FDNAMES""] = "":"".join([f""{n}.socket"" for n in range(count)]) result[""LISTEN_PID""] = str(pid) return result ","def getEnvironment(count: int, pid: object) -> Dict[str, str]: """""" Create a copy of the process environment and add I{LISTEN_FDS} and I{LISTEN_PID} (the environment variables set by systemd) to it. """""" result = os.environ.copy() result[""LISTEN_FDS""] = str(count) result[""LISTEN_FDNAMES""] = "":"".join([f""{n}.socket"" for n in range(count)]) result[""LISTEN_PID""] = str(pid) return result " 59542,"def marginal_distribution( counts: dict, indices: Optional[List[int]] = None, format_marginal: bool = False ) -> Dict[str, int]: """"""Marginalize counts from an experiment over some indices of interest. Unlike :func:`~.marginal_counts` this function respects the order of the input ``indices``. If the input ``indices`` list is specified the order the bit indices are specified will be the output order of the bitstrings in the marginalized output. Args: counts: result to be marginalized indices: The bit positions of interest to marginalize over. If ``None`` (default), do not marginalize at all. format_marginal: Default: False. If True, takes the output of marginalize and formats it with placeholders between cregs and for non-indices. Returns: dict(str, int): A marginalized dictionary Raises: QiskitError: If any value in ``indices`` is invalid or the ``counts`` dict is invalid. """""" num_clbits = len(max(counts.keys()).replace("" "", """")) if indices is not None and (not indices or not set(indices).issubset(range(num_clbits))): raise QiskitError(f""indices must be in range [0, {num_clbits - 1}]."") if isinstance(counts, Counts): res = results_rs.marginal_counts(counts, indices) elif isinstance(counts, (ProbDistribution, QuasiDistribution)): res = results_rs.marginal_distribution(counts, indices) else: first_value = next(iter(counts.values())) if isinstance(first_value, int): res = results_rs.marginal_counts(counts, indices) elif isinstance(first_value, float): res = results_rs.marginal_distribution(counts, indices) else: raise QiskitError(""Values of counts must be an int or float"") if format_marginal and indices is not None: return _format_marginal(counts, res, indices) return res ","def marginal_distribution( counts: dict, indices: Optional[List[int]] = None, format_marginal: bool = False ) -> Dict[str, int]: """"""Marginalize counts from an experiment over some indices of interest. Unlike :func:`~.marginal_counts` this function respects the order of the input ``indices``. If the input ``indices`` list is specified, the order the bit indices will be the output order of the bitstrings in the marginalized output. Args: counts: result to be marginalized indices: The bit positions of interest to marginalize over. If ``None`` (default), do not marginalize at all. format_marginal: Default: False. If True, takes the output of marginalize and formats it with placeholders between cregs and for non-indices. Returns: dict(str, int): A marginalized dictionary Raises: QiskitError: If any value in ``indices`` is invalid or the ``counts`` dict is invalid. """""" num_clbits = len(max(counts.keys()).replace("" "", """")) if indices is not None and (not indices or not set(indices).issubset(range(num_clbits))): raise QiskitError(f""indices must be in range [0, {num_clbits - 1}]."") if isinstance(counts, Counts): res = results_rs.marginal_counts(counts, indices) elif isinstance(counts, (ProbDistribution, QuasiDistribution)): res = results_rs.marginal_distribution(counts, indices) else: first_value = next(iter(counts.values())) if isinstance(first_value, int): res = results_rs.marginal_counts(counts, indices) elif isinstance(first_value, float): res = results_rs.marginal_distribution(counts, indices) else: raise QiskitError(""Values of counts must be an int or float"") if format_marginal and indices is not None: return _format_marginal(counts, res, indices) return res " 43930,"def _boys(n, t): r""""""Evaluate Boys function. The :math:`n`-th order `Boys function `_ is defined as .. math:: F_n(t) = \int_{0}^{1}x^{2n} e^{-tx^2}dx. The Boys function is related to the lower incomplete Gamma `function `_, :math:`\gamma`, as .. math:: F_n(t) = \frac{1}{2t^{n + 0.5}} \gamma(n + 0.5, t), where .. math:: \gamma(m, t) = \int_{0}^{t} x^{m-1} e^{-x} dx. Args: n (float): order of the Boys function t (float): exponent of the Boys function Returns: float: magnitude of the Boys function """""" if t == 0.0: return 1 / (2 * n + 1) return asp.special.gammainc(n + 0.5, t) * asp.special.gamma(n + 0.5) / (2 * t ** (n + 0.5)) ","def _boys(n, t): r""""""Evaluate Boys function. The :math:`n`-th order `Boys function `_ is defined as .. math:: F_n(t) = \int_{0}^{1}x^{2n} e^{-tx^2}dx. The Boys function is related to the lower incomplete Gamma `function `_, :math:`\gamma`, as .. math:: F_n(t) = \frac{1}{2t^{n + 0.5}} \gamma(n + 0.5, t), where .. math:: \gamma(m, t) = \int_{0}^{t} x^{m-1} e^{-x} dx. Args: n (float): order of the Boys function t (float): exponent of the Boys function Returns: float: value of the Boys function """""" if t == 0.0: return 1 / (2 * n + 1) return asp.special.gammainc(n + 0.5, t) * asp.special.gamma(n + 0.5) / (2 * t ** (n + 0.5)) " 21932,"def forced_response(sys, T=None, U=0., X0=0., transpose=False, interpolate=False, squeeze=True): """"""Simulate the output of a linear system. As a convenience for parameters `U`, `X0`: Numbers (scalars) are converted to constant arrays with the correct shape. The correct shape is inferred from arguments `sys` and `T`. For information on the **shape** of parameters `U`, `T`, `X0` and return values `T`, `yout`, `xout`, see :ref:`time-series-convention`. Parameters ---------- sys: LTI (StateSpace, or TransferFunction) LTI system to simulate T: array-like, optional for discrete LTI `sys` Time steps at which the input is defined; values must be evenly spaced. U: array-like or number, optional Input array giving input at each time `T` (default = 0). If `U` is ``None`` or ``0``, a special algorithm is used. This special algorithm is faster than the general algorithm, which is used otherwise. X0: array-like or number, optional Initial condition (default = 0). transpose: bool, optional (default=False) If True, transpose all input and output arrays (for backward compatibility with MATLAB and scipy.signal.lsim) interpolate: bool, optional (default=False) If True and system is a discrete time system, the input will be interpolated between the given time steps and the output will be given at system sampling rate. Otherwise, only return the output at the times given in `T`. No effect on continuous time simulations (default = False). squeeze: bool, optional (default=True) If True, remove single-dimensional entries from the shape of the output. For single output systems, this converts the output response to a 1D array. Returns ------- T: array Time values of the output. yout: array Response of the system. xout: array Time evolution of the state vector. See Also -------- step_response, initial_response, impulse_response Notes ----- For discrete time systems, the input/output response is computed using the scipy.signal.dlsim function. For continuous time systems, the output is computed using the matrix exponential `exp(A t)` and assuming linear interpolation of the inputs between time points. Examples -------- >>> T, yout, xout = forced_response(sys, T, u, X0) See :ref:`time-series-convention`. """""" if not isinstance(sys, LTI): raise TypeError('Parameter ``sys``: must be a ``LTI`` object. ' '(For example ``StateSpace`` or ``TransferFunction``)') sys = _convertToStateSpace(sys) A, B, C, D = np.asarray(sys.A), np.asarray(sys.B), np.asarray(sys.C), \ np.asarray(sys.D) # d_type = A.dtype n_states = A.shape[0] n_inputs = B.shape[1] n_outputs = C.shape[0] # Convert inputs to numpy arrays for easier shape checking if U is not None: U = np.asarray(U) if T is not None: T = np.asarray(T) # Set and/or check time vector in discrete time case if isdtime(sys, strict=True): if T is None: if U is None: raise ValueError('Parameters ``T`` and ``U`` can\'t both be' 'zero for discrete-time simulation') # Set T to equally spaced samples with same length as U if U.ndim == 1: n_steps = U.shape[0] else: n_steps = U.shape[1] T = np.array(range(n_steps)) * (1 if sys.dt is True else sys.dt) else: # Make sure the input vector and time vector have same length # TODO: allow interpolation of the input vector if (U.ndim == 1 and U.shape[0] != T.shape[0]) or \ (U.ndim > 1 and U.shape[1] != T.shape[0]): ValueError('Pamameter ``T`` must have same elements as' ' the number of columns in input array ``U``') # Test if T has shape (n,) or (1, n); # T must be array-like and values must be increasing. # The length of T determines the length of the input vector. if T is None: raise ValueError('Parameter ``T``: must be array-like, and contain ' '(strictly monotonic) increasing numbers.') T = _check_convert_array(T, [('any',), (1, 'any')], 'Parameter ``T``: ', squeeze=True, transpose=transpose) dt = T[1] - T[0] if not np.allclose(T[1:] - T[:-1], dt): raise ValueError(""Parameter ``T``: time values must be "" ""equally spaced."") n_steps = T.shape[0] # number of simulation steps # create X0 if not given, test if X0 has correct shape X0 = _check_convert_array(X0, [(n_states,), (n_states, 1)], 'Parameter ``X0``: ', squeeze=True) xout = np.zeros((n_states, n_steps)) xout[:, 0] = X0 yout = np.zeros((n_outputs, n_steps)) # Separate out the discrete and continuous time cases if isctime(sys): # Solve the differential equation, copied from scipy.signal.ltisys. dot, squeeze, = np.dot, np.squeeze # Faster and shorter code # Faster algorithm if U is zero if U is None or (isinstance(U, (int, float)) and U == 0): # Solve using matrix exponential expAdt = sp.linalg.expm(A * dt) for i in range(1, n_steps): xout[:, i] = dot(expAdt, xout[:, i-1]) yout = dot(C, xout) # General algorithm that interpolates U in between output points else: # Test if U has correct shape and type legal_shapes = [(n_steps,), (1, n_steps)] if n_inputs == 1 else \ [(n_inputs, n_steps)] U = _check_convert_array(U, legal_shapes, 'Parameter ``U``: ', squeeze=False, transpose=transpose) # convert 1D array to 2D array with only one row if len(U.shape) == 1: U = U.reshape(1, -1) # pylint: disable=E1103 # Algorithm: to integrate from time 0 to time dt, with linear # interpolation between inputs u(0) = u0 and u(dt) = u1, we solve # xdot = A x + B u, x(0) = x0 # udot = (u1 - u0) / dt, u(0) = u0. # # Solution is # [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ] # [ u(dt) ] = exp [ 0 0 I ] [ u0 ] # [u1 - u0] [ 0 0 0 ] [u1 - u0] M = np.block([[A * dt, B * dt, np.zeros((n_states, n_inputs))], [np.zeros((n_inputs, n_states + n_inputs)), np.identity(n_inputs)], [np.zeros((n_inputs, n_states + 2 * n_inputs))]]) expM = sp.linalg.expm(M) Ad = expM[:n_states, :n_states] Bd1 = expM[:n_states, n_states+n_inputs:] Bd0 = expM[:n_states, n_states:n_states + n_inputs] - Bd1 for i in range(1, n_steps): xout[:, i] = (dot(Ad, xout[:, i-1]) + dot(Bd0, U[:, i-1]) + dot(Bd1, U[:, i])) yout = dot(C, xout) + dot(D, U) tout = T else: # Discrete type system => use SciPy signal processing toolbox if sys.dt is not True: # Make sure that the time increment is a multiple of sampling time # First make sure that time increment is bigger than sampling time # (with allowance for small precision errors) if dt < sys.dt and not np.isclose(dt, sys.dt): raise ValueError(""Time steps ``T`` must match sampling time"") # Now check to make sure it is a multiple (with check against # sys.dt because floating point mod can have small errors elif not (np.isclose(dt % sys.dt, 0) or np.isclose(dt % sys.dt, sys.dt)): raise ValueError(""Time steps ``T`` must be multiples of "" ""sampling time"") sys_dt = sys.dt else: sys_dt = dt # For unspecified sampling time, use time incr # Discrete time simulation using signal processing toolbox dsys = (A, B, C, D, sys_dt) # Use signal processing toolbox for the discrete time simulation # Transpose the input to match toolbox convention tout, yout, xout = sp.signal.dlsim(dsys, np.transpose(U), T, X0) if not interpolate: # If dt is different from sys.dt, resample the output inc = int(round(dt / sys_dt)) tout = T # Return exact list of time steps yout = yout[::inc, :] xout = xout[::inc, :] # Transpose the output and state vectors to match local convention xout = sp.transpose(xout) yout = sp.transpose(yout) # Get rid of unneeded dimensions if squeeze: yout = np.squeeze(yout) xout = np.squeeze(xout) # See if we need to transpose the data back into MATLAB form if transpose: tout = np.transpose(tout) yout = np.transpose(yout) xout = np.transpose(xout) return tout, yout, xout ","def forced_response(sys, T=None, U=0., X0=0., transpose=False, interpolate=False, squeeze=True): """"""Simulate the output of a linear system. As a convenience for parameters `U`, `X0`: Numbers (scalars) are converted to constant arrays with the correct shape. The correct shape is inferred from arguments `sys` and `T`. For information on the **shape** of parameters `U`, `T`, `X0` and return values `T`, `yout`, `xout`, see :ref:`time-series-convention`. Parameters ---------- sys: LTI (StateSpace, or TransferFunction) LTI system to simulate T: array-like, optional for discrete LTI `sys` Time steps at which the input is defined; values must be evenly spaced. U: array-like or number, optional Input array giving input at each time `T` (default = 0). If `U` is ``None`` or ``0``, a special algorithm is used. This special algorithm is faster than the general algorithm, which is used otherwise. X0: array-like or number, optional Initial condition (default = 0). transpose: bool, optional (default=False) If True, transpose all input and output arrays (for backward compatibility with MATLAB and scipy.signal.lsim) interpolate: bool, optional (default=False) If True and system is a discrete time system, the input will be interpolated between the given time steps and the output will be given at system sampling rate. Otherwise, only return the output at the times given in `T`. No effect on continuous time simulations (default = False). squeeze: bool, optional (default=True) If True, remove single-dimensional entries from the shape of the output. For single output systems, this converts the output response to a 1D array. Returns ------- T: array Time values of the output. yout: array Response of the system. xout: array Time evolution of the state vector. See Also -------- step_response, initial_response, impulse_response Notes ----- For discrete time systems, the input/output response is computed using the :func:`scipy.signal.dlsim` function. For continuous time systems, the output is computed using the matrix exponential `exp(A t)` and assuming linear interpolation of the inputs between time points. Examples -------- >>> T, yout, xout = forced_response(sys, T, u, X0) See :ref:`time-series-convention`. """""" if not isinstance(sys, LTI): raise TypeError('Parameter ``sys``: must be a ``LTI`` object. ' '(For example ``StateSpace`` or ``TransferFunction``)') sys = _convertToStateSpace(sys) A, B, C, D = np.asarray(sys.A), np.asarray(sys.B), np.asarray(sys.C), \ np.asarray(sys.D) # d_type = A.dtype n_states = A.shape[0] n_inputs = B.shape[1] n_outputs = C.shape[0] # Convert inputs to numpy arrays for easier shape checking if U is not None: U = np.asarray(U) if T is not None: T = np.asarray(T) # Set and/or check time vector in discrete time case if isdtime(sys, strict=True): if T is None: if U is None: raise ValueError('Parameters ``T`` and ``U`` can\'t both be' 'zero for discrete-time simulation') # Set T to equally spaced samples with same length as U if U.ndim == 1: n_steps = U.shape[0] else: n_steps = U.shape[1] T = np.array(range(n_steps)) * (1 if sys.dt is True else sys.dt) else: # Make sure the input vector and time vector have same length # TODO: allow interpolation of the input vector if (U.ndim == 1 and U.shape[0] != T.shape[0]) or \ (U.ndim > 1 and U.shape[1] != T.shape[0]): ValueError('Pamameter ``T`` must have same elements as' ' the number of columns in input array ``U``') # Test if T has shape (n,) or (1, n); # T must be array-like and values must be increasing. # The length of T determines the length of the input vector. if T is None: raise ValueError('Parameter ``T``: must be array-like, and contain ' '(strictly monotonic) increasing numbers.') T = _check_convert_array(T, [('any',), (1, 'any')], 'Parameter ``T``: ', squeeze=True, transpose=transpose) dt = T[1] - T[0] if not np.allclose(T[1:] - T[:-1], dt): raise ValueError(""Parameter ``T``: time values must be "" ""equally spaced."") n_steps = T.shape[0] # number of simulation steps # create X0 if not given, test if X0 has correct shape X0 = _check_convert_array(X0, [(n_states,), (n_states, 1)], 'Parameter ``X0``: ', squeeze=True) xout = np.zeros((n_states, n_steps)) xout[:, 0] = X0 yout = np.zeros((n_outputs, n_steps)) # Separate out the discrete and continuous time cases if isctime(sys): # Solve the differential equation, copied from scipy.signal.ltisys. dot, squeeze, = np.dot, np.squeeze # Faster and shorter code # Faster algorithm if U is zero if U is None or (isinstance(U, (int, float)) and U == 0): # Solve using matrix exponential expAdt = sp.linalg.expm(A * dt) for i in range(1, n_steps): xout[:, i] = dot(expAdt, xout[:, i-1]) yout = dot(C, xout) # General algorithm that interpolates U in between output points else: # Test if U has correct shape and type legal_shapes = [(n_steps,), (1, n_steps)] if n_inputs == 1 else \ [(n_inputs, n_steps)] U = _check_convert_array(U, legal_shapes, 'Parameter ``U``: ', squeeze=False, transpose=transpose) # convert 1D array to 2D array with only one row if len(U.shape) == 1: U = U.reshape(1, -1) # pylint: disable=E1103 # Algorithm: to integrate from time 0 to time dt, with linear # interpolation between inputs u(0) = u0 and u(dt) = u1, we solve # xdot = A x + B u, x(0) = x0 # udot = (u1 - u0) / dt, u(0) = u0. # # Solution is # [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ] # [ u(dt) ] = exp [ 0 0 I ] [ u0 ] # [u1 - u0] [ 0 0 0 ] [u1 - u0] M = np.block([[A * dt, B * dt, np.zeros((n_states, n_inputs))], [np.zeros((n_inputs, n_states + n_inputs)), np.identity(n_inputs)], [np.zeros((n_inputs, n_states + 2 * n_inputs))]]) expM = sp.linalg.expm(M) Ad = expM[:n_states, :n_states] Bd1 = expM[:n_states, n_states+n_inputs:] Bd0 = expM[:n_states, n_states:n_states + n_inputs] - Bd1 for i in range(1, n_steps): xout[:, i] = (dot(Ad, xout[:, i-1]) + dot(Bd0, U[:, i-1]) + dot(Bd1, U[:, i])) yout = dot(C, xout) + dot(D, U) tout = T else: # Discrete type system => use SciPy signal processing toolbox if sys.dt is not True: # Make sure that the time increment is a multiple of sampling time # First make sure that time increment is bigger than sampling time # (with allowance for small precision errors) if dt < sys.dt and not np.isclose(dt, sys.dt): raise ValueError(""Time steps ``T`` must match sampling time"") # Now check to make sure it is a multiple (with check against # sys.dt because floating point mod can have small errors elif not (np.isclose(dt % sys.dt, 0) or np.isclose(dt % sys.dt, sys.dt)): raise ValueError(""Time steps ``T`` must be multiples of "" ""sampling time"") sys_dt = sys.dt else: sys_dt = dt # For unspecified sampling time, use time incr # Discrete time simulation using signal processing toolbox dsys = (A, B, C, D, sys_dt) # Use signal processing toolbox for the discrete time simulation # Transpose the input to match toolbox convention tout, yout, xout = sp.signal.dlsim(dsys, np.transpose(U), T, X0) if not interpolate: # If dt is different from sys.dt, resample the output inc = int(round(dt / sys_dt)) tout = T # Return exact list of time steps yout = yout[::inc, :] xout = xout[::inc, :] # Transpose the output and state vectors to match local convention xout = sp.transpose(xout) yout = sp.transpose(yout) # Get rid of unneeded dimensions if squeeze: yout = np.squeeze(yout) xout = np.squeeze(xout) # See if we need to transpose the data back into MATLAB form if transpose: tout = np.transpose(tout) yout = np.transpose(yout) xout = np.transpose(xout) return tout, yout, xout " 35924,"def test_base_fee(): computation = run_general_computation(LondonVM) computation.opcodes[opcode_values.BASEFEE](computation) result = computation.stack_pop1_int() assert result == 10 ** 9 # 1 gwei ","def test_base_fee(): computation = run_general_computation(LondonVM) computation.opcodes[opcode_values.BASEFEE](computation) result = computation.stack_pop1_any() assert result == 10 ** 9 # 1 gwei " 36674,"def getproxies_environment(): """"""Return a dictionary of scheme -> proxy server URL mappings. Scan the environment for variables named _proxy; this seems to be the standard convention. If you need a different way, you can pass a proxies dictionary to the [Fancy]URLopener constructor. """""" # in order to prefer lowercase variables, process environment in # two passes: first matches any, second pass matches lowercase only # select only environment variables which end in (after making lowercase) _proxy candidate_names = [name for name in os.environ.keys() if len(name)>5 and name[-6]=='_'] # fast selection of candidates environment = [(name, os.environ[name], name.lower()) for name in candidate_names if name[-6:].lower()=='_proxy'] proxies = {} for name, value, name_lower in environment: if value and name_lower[-6:] == '_proxy': proxies[name_lower[:-6]] = value # CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY # (non-all-lowercase) as it may be set from the web server by a ""Proxy:"" # header from the client # If ""proxy"" is lowercase, it will still be used thanks to the next block if 'REQUEST_METHOD' in os.environ: proxies.pop('http', None) for name, value, name_lower in environment: if name[-6:] == '_proxy': if value: proxies[name_lower[:-6]] = value else: proxies.pop(name_lower[:-6], None) return proxies ","def getproxies_environment(): """"""Return a dictionary of scheme -> proxy server URL mappings. Scan the environment for variables named _proxy; this seems to be the standard convention. If you need a different way, you can pass a proxies dictionary to the [Fancy]URLopener constructor. """""" # in order to prefer lowercase variables, process environment in # two passes: first matches any, second pass matches lowercase only # select only environment variables which end in (after making lowercase) _proxy candidate_names = [name for name in os.environ.keys() if len(name)>5 and name[-6]=='_'] # fast selection of candidates environment = [(name, os.environ[name], name.lower()) for name in candidate_names if name[-6:].lower()=='_proxy'] proxies = {} for name, value, name_lower in environment: if value and name_lower[-6:] == '_proxy': proxies[name_lower[:-6]] = value # CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY # (non-all-lowercase) as it may be set from the web server by a ""Proxy:"" # header from the client # If ""proxy"" is lowercase, it will still be used thanks to the next block if 'REQUEST_METHOD' in os.environ: proxies.pop('http', None) for name, value, name_lower in environment: if name[-6:] == '_proxy': if value: proxies[proxy_name] = value else: proxies.pop(name_lower[:-6], None) return proxies " 39954,"def test_messagekit_validation(capsule_side_channel): """"""Ensure that our users know exactly what's wrong with their message kit input"""""" class MessageKitsOnly(BaseSchema): mkit = fields.MessageKit() # this will raise a base64 error with pytest.raises(SpecificationError) as e: MessageKitsOnly().load({'mkit': ""I got a message for you""}) # assert that field name is in the error message assert ""Could not parse mkit"" in str(e) assert ""Incorrect padding"" in str(e) # valid base64 but invalid treasuremap b64header = base64.b64encode(MessageKit._header())[:-2].decode() with pytest.raises(SpecificationError) as e: MessageKitsOnly().load({'mkit': b64header + ""V3da==""}) assert ""Could not parse mkit"" in str(e) assert ""Not enough bytes to constitute message types"" in str(e) # test a valid messagekit valid_kit = capsule_side_channel.messages[0][0] kit_bytes = bytes(valid_kit) kit_b64 = base64.b64encode(kit_bytes) result = MessageKitsOnly().load({'mkit': kit_b64.decode()}) assert isinstance(result['mkit'], MessageKitClass) ","def test_messagekit_validation(capsule_side_channel): """"""Ensure that our users know exactly what's wrong with their message kit input"""""" class MessageKitsOnly(BaseSchema): mkit = fields.MessageKit() # this will raise a base64 error with pytest.raises(SpecificationError) as e: MessageKitsOnly().load({'mkit': ""I got a message for you""}) # assert that field name is in the error message assert ""Could not parse mkit"" in str(e) assert ""Incorrect padding"" in str(e) # valid base64 but invalid messagekit b64header = base64.b64encode(MessageKit._header())[:-2].decode() with pytest.raises(SpecificationError) as e: MessageKitsOnly().load({'mkit': b64header + ""V3da==""}) assert ""Could not parse mkit"" in str(e) assert ""Not enough bytes to constitute message types"" in str(e) # test a valid messagekit valid_kit = capsule_side_channel.messages[0][0] kit_bytes = bytes(valid_kit) kit_b64 = base64.b64encode(kit_bytes) result = MessageKitsOnly().load({'mkit': kit_b64.decode()}) assert isinstance(result['mkit'], MessageKitClass) " 24721,"def writeYAML(filename, data, **kwags): """"""Writes data to file in YAML format."""""" # set default kwags for yaml dump if ""explicit_start"" not in kwags: kwags[""explicit_start""] = True if ""explicit_end"" not in kwags: kwags[""explicit_end""] = True if ""default_flow_style"" not in kwags: kwags[""default_flow_style""] = None with open(filename, ""w"") as f: yaml.dump(data, f, **kwags) ","def writeYAML(filename, data, **kwargs): """"""Writes data to file in YAML format."""""" # set default kwags for yaml dump if ""explicit_start"" not in kwags: kwags[""explicit_start""] = True if ""explicit_end"" not in kwags: kwags[""explicit_end""] = True if ""default_flow_style"" not in kwags: kwags[""default_flow_style""] = None with open(filename, ""w"") as f: yaml.dump(data, f, **kwags) " 10650,"def create_chromium_webdriver(extra_options=None) -> WebDriver: options = webdriver.chrome.options.Options() options.add_argument(""--headless"") options.add_argument(""--hide-scrollbars"") options.add_argument(""--force-device-scale-factor=1"") options.add_argument(""--force-color-profile=srgb"") if extra_options: for op in extra_options: assert isinstance(op,str) options.add_argument(op) return webdriver.Chrome(options=options) ","def create_chromium_webdriver(extra_options: Optional[List[str]] = None) -> WebDriver: options = webdriver.chrome.options.Options() options.add_argument(""--headless"") options.add_argument(""--hide-scrollbars"") options.add_argument(""--force-device-scale-factor=1"") options.add_argument(""--force-color-profile=srgb"") if extra_options: for op in extra_options: assert isinstance(op,str) options.add_argument(op) return webdriver.Chrome(options=options) " 36741,"def mmap(fd, offset, size): prot = MMAP.PROT_READ | MMAP.PROT_WRITE flags = MMAP.MAP_PRIVATE if size & 0xfff != 0: size = (size & ~0xfff) + 0x1000 assert size > 0 aligned_offset = offset & ~0xfff result = mmap_function(0, size, prot, flags, fd, aligned_offset) return ctypes.cast(result + offset - aligned_offset, ctypes.POINTER(ctypes.c_char)) ","def mmap(fd, offset, size): prot = MMAP.PROT_READ | MMAP.PROT_WRITE flags = MMAP.MAP_PRIVATE if size & 0xfff != 0: size = (size & ~0xfff) + 0x1000 assert size > 0 aligned_offset = offset & ~0xfff result = mmap_function(0, size + offset - aligned_offset, prot, flags, fd, aligned_offset) return ctypes.cast(result + offset - aligned_offset, ctypes.POINTER(ctypes.c_char)) " 42389,"def plot_bands(arr, title=None, cmap=""Greys_r"", figsize=(12, 12), cols=3, extent=None): """"""Plot each layer in a raster stack read from rasterio in (band, row ,col) order as a numpy array. plot_bands will create an individual plot for each band in a grid. Parameters ---------- arr: numpy array An n-dimensional numpy array in (band, row, col) order title: str or list Title of one band, or list of titles with one title per band cmap: str Colormap name (""greys"" by default) cols: int Number of columns for plot grid (default: 3) figsize: tuple - optional Figure size in inches ((12, 12) by default) extent: tuple - optional Bounding box that the data will fill: (minx, miny, maxx, maxy) Returns ---------- fig, ax or axs : figure object, axes object The figure and axes object(s) associated with the plot. Examples -------- >>>import earthpy.spatial as es ... ...im = np.random.randint(10, size=(2, 4, 5)) ...titles = [""Red Band"", ""Green Band""] ... ...# Plot all bands of a raster tif ...es.plot_bands(im, ... title=titles, ... figsize=(12,5), ... cols=2) """""" try: arr.ndim except AttributeError: ""Input arr should be a numpy array"" if title: if (arr.ndim == 2) and (len(title) > 1): raise ValueError(""""""You have provided more than one title for a single band array"""""") elif not (len(title) == arr.shape[0]): raise ValueError(""""""The number of plot titles should equal the number of array raster layers."""""") # If the array is 3 dimensional setup grid plotting if arr.ndim > 2 and arr.shape[0] > 1: # Calculate the total rows that will be required to plot each band plot_rows = int(np.ceil(arr.shape[0] / cols)) total_layers = arr.shape[0] # Plot all bands fig, axs = plt.subplots(plot_rows, cols, figsize=figsize) axs_ravel = axs.ravel() for ax, i in zip(axs_ravel, range(total_layers)): band = i+1 ax.imshow(bytescale(arr[i]), cmap=cmap) if title: ax.set(title=title[i]) else: ax.set(title='Band %i' %band) ax.set(xticks=[], yticks=[]) # This loop clears out the plots for axes which are empty # A matplotlib grid is always x by x. eg: 8 bands with 3 cols for ax in axs_ravel[total_layers:]: ax.set_axis_off() ax.set(xticks=[], yticks=[]) plt.tight_layout() return fig, axs elif arr.ndim == 2 or arr.shape[0] == 1: # If it's a 2 dimensional array with a 3rd dimension arr = np.squeeze(arr) fig, ax = plt.subplots(figsize=figsize) ax.imshow(bytescale(arr), cmap=cmap, extent=extent) if title: ax.set(title=title) ax.set(xticks=[], yticks=[]) return fig, ax ","def plot_bands(arr, title=None, cmap=""Greys_r"", figsize=(12, 12), cols=3, extent=None): """"""Plot each layer in a raster stack read from rasterio in (band, row ,col) order as a numpy array. plot_bands will create an individual plot for each band in a grid. Parameters ---------- arr: numpy array An n-dimensional numpy array with shape (band, row, col) title: str or list Title of one band, or list of titles with one title per band cmap: str Colormap name (""greys"" by default) cols: int Number of columns for plot grid (default: 3) figsize: tuple - optional Figure size in inches ((12, 12) by default) extent: tuple - optional Bounding box that the data will fill: (minx, miny, maxx, maxy) Returns ---------- fig, ax or axs : figure object, axes object The figure and axes object(s) associated with the plot. Examples -------- >>>import earthpy.spatial as es ... ...im = np.random.randint(10, size=(2, 4, 5)) ...titles = [""Red Band"", ""Green Band""] ... ...# Plot all bands of a raster tif ...es.plot_bands(im, ... title=titles, ... figsize=(12,5), ... cols=2) """""" try: arr.ndim except AttributeError: ""Input arr should be a numpy array"" if title: if (arr.ndim == 2) and (len(title) > 1): raise ValueError(""""""You have provided more than one title for a single band array"""""") elif not (len(title) == arr.shape[0]): raise ValueError(""""""The number of plot titles should equal the number of array raster layers."""""") # If the array is 3 dimensional setup grid plotting if arr.ndim > 2 and arr.shape[0] > 1: # Calculate the total rows that will be required to plot each band plot_rows = int(np.ceil(arr.shape[0] / cols)) total_layers = arr.shape[0] # Plot all bands fig, axs = plt.subplots(plot_rows, cols, figsize=figsize) axs_ravel = axs.ravel() for ax, i in zip(axs_ravel, range(total_layers)): band = i+1 ax.imshow(bytescale(arr[i]), cmap=cmap) if title: ax.set(title=title[i]) else: ax.set(title='Band %i' %band) ax.set(xticks=[], yticks=[]) # This loop clears out the plots for axes which are empty # A matplotlib grid is always x by x. eg: 8 bands with 3 cols for ax in axs_ravel[total_layers:]: ax.set_axis_off() ax.set(xticks=[], yticks=[]) plt.tight_layout() return fig, axs elif arr.ndim == 2 or arr.shape[0] == 1: # If it's a 2 dimensional array with a 3rd dimension arr = np.squeeze(arr) fig, ax = plt.subplots(figsize=figsize) ax.imshow(bytescale(arr), cmap=cmap, extent=extent) if title: ax.set(title=title) ax.set(xticks=[], yticks=[]) return fig, ax " 9013,"def command(*command_list) -> typing.Callable: """"""Decorate a function to set one or more commands that should trigger it. :param str command_list: one or more command name(s) to match This decorator can be used to add multiple commands to one callable in a single line. The resulting match object will have the command as the first group; the rest of the line, excluding leading whitespace, as the second group; and parameters 1 through 4, separated by whitespace, as groups 3-6. Example:: @command(""hello"") # If the command prefix is ""\\."", this would trigger on lines # starting with "".hello"". @command('j', 'join') # If the command prefix is ""\\."", this would trigger on lines # starting with either "".j"" or "".join"". You can use a space in the command name to implement subcommands:: @command('main sub1', 'main sub2') # For "".main sub1"", trigger.group(1) will return ""main sub1"" # For "".main sub2"", trigger.group(1) will return ""main sub2"" But in that case, be careful with the order of the names: if a more generic pattern is defined first, it will have priority over less generic patterns. So for instance, to have ``.main`` and ``.main sub`` working properly, you need to declare them like this:: @command('main sub', 'main') # This command will react properly to "".main sub"" and "".main"" Then, you can check ``trigger.group(1)`` to know if it was used as ``main sub`` or just ``main`` in your callable. If you declare them in the wrong order, ``.main`` will have priority and you won't be able to take advantage of that. Another option is to declare command with subcommands only, like this:: @command('main sub1') # this command will be triggered on .main sub1 @command('main sub2') # this other command will be triggered on .main sub2 In that case, ``.main`` won't trigger anything, and you won't have to inspect the trigger's groups to know which subcommand is triggered. .. note:: If you use this decorator multiple times, remember that the decorators are invoked in the reverse order of appearance:: # These two decorators... @command('hi') @command('hello') # ...are equivalent to this single decorator @command('hello', 'hi') See also the `Function Definitions`__ chapter from the Python documentation for more information about functions and decorators. .. __: https://docs.python.org/3/reference/compound_stmts.html#function-definitions .. note:: You can use a regular expression for the command name(s), but this is **not recommended** since version 7.1. For backward compatibility, this behavior will be kept until version 8.0. Regex patterns are confusing for your users; please don't use them in command names! If you still want to use a regex pattern, please use the :func:`rule` decorator instead. For extra arguments and subcommands based on a regex pattern, you should handle these inside your decorated function, by using the ``trigger`` object. """""" def add_attribute(function): function._sopel_callable = True if not hasattr(function, ""commands""): function.commands = [] for command in command_list: if command not in function.commands: function.commands.append(command) return function return add_attribute ","def command(*command_list: str) -> typing.Callable: """"""Decorate a function to set one or more commands that should trigger it. :param str command_list: one or more command name(s) to match This decorator can be used to add multiple commands to one callable in a single line. The resulting match object will have the command as the first group; the rest of the line, excluding leading whitespace, as the second group; and parameters 1 through 4, separated by whitespace, as groups 3-6. Example:: @command(""hello"") # If the command prefix is ""\\."", this would trigger on lines # starting with "".hello"". @command('j', 'join') # If the command prefix is ""\\."", this would trigger on lines # starting with either "".j"" or "".join"". You can use a space in the command name to implement subcommands:: @command('main sub1', 'main sub2') # For "".main sub1"", trigger.group(1) will return ""main sub1"" # For "".main sub2"", trigger.group(1) will return ""main sub2"" But in that case, be careful with the order of the names: if a more generic pattern is defined first, it will have priority over less generic patterns. So for instance, to have ``.main`` and ``.main sub`` working properly, you need to declare them like this:: @command('main sub', 'main') # This command will react properly to "".main sub"" and "".main"" Then, you can check ``trigger.group(1)`` to know if it was used as ``main sub`` or just ``main`` in your callable. If you declare them in the wrong order, ``.main`` will have priority and you won't be able to take advantage of that. Another option is to declare command with subcommands only, like this:: @command('main sub1') # this command will be triggered on .main sub1 @command('main sub2') # this other command will be triggered on .main sub2 In that case, ``.main`` won't trigger anything, and you won't have to inspect the trigger's groups to know which subcommand is triggered. .. note:: If you use this decorator multiple times, remember that the decorators are invoked in the reverse order of appearance:: # These two decorators... @command('hi') @command('hello') # ...are equivalent to this single decorator @command('hello', 'hi') See also the `Function Definitions`__ chapter from the Python documentation for more information about functions and decorators. .. __: https://docs.python.org/3/reference/compound_stmts.html#function-definitions .. note:: You can use a regular expression for the command name(s), but this is **not recommended** since version 7.1. For backward compatibility, this behavior will be kept until version 8.0. Regex patterns are confusing for your users; please don't use them in command names! If you still want to use a regex pattern, please use the :func:`rule` decorator instead. For extra arguments and subcommands based on a regex pattern, you should handle these inside your decorated function, by using the ``trigger`` object. """""" def add_attribute(function): function._sopel_callable = True if not hasattr(function, ""commands""): function.commands = [] for command in command_list: if command not in function.commands: function.commands.append(command) return function return add_attribute " 33342,"def version(**kwargs): """""" Returns the DeepSea version info currently installed """""" format_ = kwargs['format'] if 'format' in kwargs else 'plain' if format_ == 'json': ver = re.findall(r'(^\d+(\.\d+){1,2})', DEEPSEA_VERSION) offset = re.findall(r'\+\d+', DEEPSEA_VERSION) hash_ = re.findall(r'[\w]{7,8}$', DEEPSEA_VERSION) return {'full_version': DEEPSEA_VERSION, 'version': ver[0][0] if ver[0] else '0.0.0', 'git_offset': offset[0].lstrip('+') if offset else '0', 'git_hash': hash_[0][-7:] if hash_ else ''} return DEEPSEA_VERSION ","def version(**kwargs): """""" Returns the DeepSea version info currently installed """""" format_ = kwargs['format'] if 'format' in kwargs else 'plain' if format_ == 'json': ver = re.findall(r'(^\d+(\.\d+){1,2})', DEEPSEA_VERSION) offset = re.findall(r'\+\d+', DEEPSEA_VERSION) hash_ = re.findall(r'[\w]{7,8}$', DEEPSEA_VERSION) return {'full_version': DEEPSEA_VERSION, 'version': ver[0][0] if ver and ver[0] else '0.0.0', 'git_offset': offset[0].lstrip('+') if offset else '0', 'git_hash': hash_[0][-7:] if hash_ else ''} return DEEPSEA_VERSION " 7442,"def imsave(fname, arr, **kwargs): """"""Load a tiff image to file. Parameters ---------- fname : str or file File name or file-like-object. arr : ndarray The array to write kwargs : keyword pairs, optional Additional keyword arguments to pass through (see ``tifffile``'s ``imwrite`` function). Notes ----- Provided by the tifffile library [1]_, and supports many advanced image types including multi-page and floating point. This implementation will set `photomotric='RGB'` when writing if the first or last axis of arr has shape 3 or 4. To override this, explicitly specify the photometric kwarg. This implementation will set `planarconfig='SEPARATE'` when writing if the first axis of arr has shape 3 or 4. To override this, explicitly specify the planarconfig kwarg. References ---------- .. [1] https://pypi.org/project/tifffile/ """""" if arr.shape[0] in [3, 4]: if 'planarconfig' not in kwargs: kwargs['planarconfig'] = 'SEPARATE' rgb = True else: rgb = arr.shape[-1] in [3, 4] if rgb and 'photometric' not in kwargs: kwargs['photometric'] = 'RGB' return tifffile_imwrite(fname, arr, **kwargs) ","def imsave(fname, arr, **kwargs): """"""Load a tiff image to file. Parameters ---------- fname : str or file File name or file-like object. arr : ndarray The array to write kwargs : keyword pairs, optional Additional keyword arguments to pass through (see ``tifffile``'s ``imwrite`` function). Notes ----- Provided by the tifffile library [1]_, and supports many advanced image types including multi-page and floating point. This implementation will set `photomotric='RGB'` when writing if the first or last axis of arr has shape 3 or 4. To override this, explicitly specify the photometric kwarg. This implementation will set `planarconfig='SEPARATE'` when writing if the first axis of arr has shape 3 or 4. To override this, explicitly specify the planarconfig kwarg. References ---------- .. [1] https://pypi.org/project/tifffile/ """""" if arr.shape[0] in [3, 4]: if 'planarconfig' not in kwargs: kwargs['planarconfig'] = 'SEPARATE' rgb = True else: rgb = arr.shape[-1] in [3, 4] if rgb and 'photometric' not in kwargs: kwargs['photometric'] = 'RGB' return tifffile_imwrite(fname, arr, **kwargs) " 9942,"def main(): module = AnsibleModule( argument_spec=dict( server_url=dict(type='str', required=True, aliases=['url']), login_user=dict(type='str', required=True), login_password=dict(type='str', required=True, no_log=True), http_login_user=dict(type='str', required=False, default=None), http_login_password=dict(type='str', required=False, default=None, no_log=True), validate_certs=dict(type='bool', required=False, default=True), name=dict(type='str', required=True), debug_mode=dict(type='bool', default=False, required=False), gui_access=dict(default='default', required=False, choices=['default', 'internal', 'ldap', 'disabled']), users_status=dict(default='enabled', required=False, choices=['enabled', 'disabled']), rights=dict( type='list', default=[], elements='dict', required=False, opstions=dict( host_group=dict(type='str', required=True), permission=dict(type='str', required=True, choices=['denied', 'RO', 'RW']) ) ), users=dict( type='list', default=[], required=False, elements='str' ), state=dict(default=""present"", choices=['present', 'absent', 'dump']), timeout=dict(type='int', default=10) ), supports_check_mode=True ) if not HAS_ZABBIX_API: module.fail_json(msg=""Missing required zabbix-api module "" + ""(check docs or install with: "" + ""pip install zabbix-api)"") server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] http_login_user = module.params['http_login_user'] http_login_password = module.params['http_login_password'] validate_certs = module.params['validate_certs'] name = module.params['name'] debug_mode = module.params['debug_mode'] gui_access = module.params['gui_access'] users_status = module.params['users_status'] users = module.params['users'] rights = module.params['rights'] state = module.params['state'] timeout = module.params['timeout'] zbx = None # Login to zabbix try: zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, validate_certs=validate_certs) zbx.login(login_user, login_password) except ZabbixAPIException as error: module.fail_json(msg=""Failed to connect to Zabbix server: %s"" % error) # Load UserGroup module user_group = UserGroup(module, zbx) group_id = user_group.get_group_id(name) # Delete group if state == ""absent"": if not group_id: module.exit_json(changed=False, msg=""User group not found, no change: %s"" % name) user_group.delete_group(group_id) module.exit_json(changed=True, result=""Successfully deleted group %s"" % name) elif state == ""dump"": if not group_id: module.fail_json(msg='User group not found: %s' % name) module.exit_json(changed=False, group_json=user_group.dump_group(group_id)) elif state == ""present"": # Does not exists going to create it if not group_id: user_group.create_group(name, debug_mode, gui_access, users_status, rights, users) module.exit_json(changed=True, result=""Successfully created group %s"" % name) # Else we update it else: changed = user_group.update_group(group_id, name, debug_mode, gui_access, users_status, rights, users) module.exit_json(changed=changed) ","def main(): module = AnsibleModule( argument_spec=dict( server_url=dict(type='str', required=True, aliases=['url']), login_user=dict(type='str', required=True), login_password=dict(type='str', required=True, no_log=True), http_login_user=dict(type='str', required=False, default=None), http_login_password=dict(type='str', required=False, default=None, no_log=True), validate_certs=dict(type='bool', required=False, default=True), name=dict(type='str', required=True), debug_mode=dict(type='bool', default=False, required=False), gui_access=dict(default='default', required=False, choices=['default', 'internal', 'ldap', 'disabled']), users_status=dict(default='enabled', required=False, choices=['enabled', 'disabled']), rights=dict( type='list', default=[], elements='dict', required=False, options=dict( host_group=dict(type='str', required=True), permission=dict(type='str', required=True, choices=['denied', 'RO', 'RW']) ) ), users=dict( type='list', default=[], required=False, elements='str' ), state=dict(default=""present"", choices=['present', 'absent', 'dump']), timeout=dict(type='int', default=10) ), supports_check_mode=True ) if not HAS_ZABBIX_API: module.fail_json(msg=""Missing required zabbix-api module "" + ""(check docs or install with: "" + ""pip install zabbix-api)"") server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] http_login_user = module.params['http_login_user'] http_login_password = module.params['http_login_password'] validate_certs = module.params['validate_certs'] name = module.params['name'] debug_mode = module.params['debug_mode'] gui_access = module.params['gui_access'] users_status = module.params['users_status'] users = module.params['users'] rights = module.params['rights'] state = module.params['state'] timeout = module.params['timeout'] zbx = None # Login to zabbix try: zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, validate_certs=validate_certs) zbx.login(login_user, login_password) except ZabbixAPIException as error: module.fail_json(msg=""Failed to connect to Zabbix server: %s"" % error) # Load UserGroup module user_group = UserGroup(module, zbx) group_id = user_group.get_group_id(name) # Delete group if state == ""absent"": if not group_id: module.exit_json(changed=False, msg=""User group not found, no change: %s"" % name) user_group.delete_group(group_id) module.exit_json(changed=True, result=""Successfully deleted group %s"" % name) elif state == ""dump"": if not group_id: module.fail_json(msg='User group not found: %s' % name) module.exit_json(changed=False, group_json=user_group.dump_group(group_id)) elif state == ""present"": # Does not exists going to create it if not group_id: user_group.create_group(name, debug_mode, gui_access, users_status, rights, users) module.exit_json(changed=True, result=""Successfully created group %s"" % name) # Else we update it else: changed = user_group.update_group(group_id, name, debug_mode, gui_access, users_status, rights, users) module.exit_json(changed=changed) " 2213,"def test_lasso_lars_ic(): # Test the LassoLarsIC object by checking that # - some good features are selected. # - alpha_bic > alpha_aic # - n_nonzero_bic < n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X = diabetes.data X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert lars_bic.alpha_ > lars_aic.alpha_ assert len(nonzero_bic) < len(nonzero_aic) assert np.max(nonzero_bic) < diabetes.data.shape[1] # test error on unknown IC lars_broken = linear_model.LassoLarsIC('') with pytest.error(ValueError): lars_broken.fit(X, y) ","def test_lasso_lars_ic(): # Test the LassoLarsIC object by checking that # - some good features are selected. # - alpha_bic > alpha_aic # - n_nonzero_bic < n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X = diabetes.data X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert lars_bic.alpha_ > lars_aic.alpha_ assert len(nonzero_bic) < len(nonzero_aic) assert np.max(nonzero_bic) < diabetes.data.shape[1] # test error on unknown IC lars_broken = linear_model.LassoLarsIC('') with pytest.raises(ValueError): lars_broken.fit(X, y) " 8842,"def commands(*command_list): """"""Decorate a function to set one or more commands that should trigger it. :param str command_list: one or more command name(s) to match This decorator can be used to add multiple commands to one callable in a single line. The resulting match object will have the command as the first group; the rest of the line, excluding leading whitespace, as the second group; and parameters 1 through 4, separated by whitespace, as groups 3-6. Example:: @commands(""hello"") # If the command prefix is ""\\."", this would trigger on lines # starting with "".hello"". @commands('j', 'join') # If the command prefix is ""\\."", this would trigger on lines # starting with either "".j"" or "".join"". You can use a space in the command name to implement subcommands:: @commands('main sub1', 'main sub2') # For "".main sub1"", trigger.group(1) will return ""main sub1"" # For "".main sub2"", trigger.group(1) will return ""main sub2"" # For "".main"", trigger.group(1) will return ""main"" But in that case, be careful with the order of the names: if a more generic pattern is defined first, it will have priority over less generic patterns. So for instance, to have ``.main`` and ``.main sub`` working properly, you need to declare them like this:: @commands('main sub', 'main') # This command will react properly to "".main sub"" and "".main"" Then, you can check ``trigger.group(1)`` to know if it was used as ``main sub`` or just ``main`` in your callable. If you declare them in the wrong order, ``.main`` will have priority and you won't be able to take advantage of that. Another option is to declare command with subscommands only, like this:: @commands('main sub1) # this command will be triggered on .main sub1 @commands('main sub2') # this other command will be triggered on .main sub2 In that case, ``.main`` won't trigger anything, and you won't have to inspect the trigger's groups to know which subcommand is triggered. .. note:: If you use this decorator multiple times, remember that the decorators are invoked in the reverse order of appearance:: @commands('hi') @commands('hello') This example is equivalent to this:: @commands('hello', 'hi') See also the `Function Definitions`__ chapter from the Python documentation for more information about functions and decorators. .. __: https://docs.python.org/3/reference/compound_stmts.html#function-definitions .. note:: You can use a regular expression for the command name(s), but this is **not recommended** since version 7.1. For backward compatibility reason, this behavior will be kept until version 8.0. Regex pattern are confusing for your user, please don't use them for command name! If you still want to use a regex pattern, please use the :func:`rule` decorator instead. For extra argument and subcommands based on a regex pattern, you should handle these inside your decorated function, by using the ``trigger`` object. """""" def add_attribute(function): if not hasattr(function, ""commands""): function.commands = [] for command in command_list: if command not in function.commands: function.commands.append(command) return function return add_attribute ","def commands(*command_list): """"""Decorate a function to set one or more commands that should trigger it. :param str command_list: one or more command name(s) to match This decorator can be used to add multiple commands to one callable in a single line. The resulting match object will have the command as the first group; the rest of the line, excluding leading whitespace, as the second group; and parameters 1 through 4, separated by whitespace, as groups 3-6. Example:: @commands(""hello"") # If the command prefix is ""\\."", this would trigger on lines # starting with "".hello"". @commands('j', 'join') # If the command prefix is ""\\."", this would trigger on lines # starting with either "".j"" or "".join"". You can use a space in the command name to implement subcommands:: @commands('main sub1', 'main sub2') # For "".main sub1"", trigger.group(1) will return ""main sub1"" # For "".main sub2"", trigger.group(1) will return ""main sub2"" # For "".main"", trigger.group(1) will return ""main"" But in that case, be careful with the order of the names: if a more generic pattern is defined first, it will have priority over less generic patterns. So for instance, to have ``.main`` and ``.main sub`` working properly, you need to declare them like this:: @commands('main sub', 'main') # This command will react properly to "".main sub"" and "".main"" Then, you can check ``trigger.group(1)`` to know if it was used as ``main sub`` or just ``main`` in your callable. If you declare them in the wrong order, ``.main`` will have priority and you won't be able to take advantage of that. Another option is to declare command with subscommands only, like this:: @commands('main sub1) # this command will be triggered on .main sub1 @commands('main sub2') # this other command will be triggered on .main sub2 In that case, ``.main`` won't trigger anything, and you won't have to inspect the trigger's groups to know which subcommand is triggered. .. note:: If you use this decorator multiple times, remember that the decorators are invoked in the reverse order of appearance:: @commands('hi') @commands('hello') This example is equivalent to this:: @commands('hello', 'hi') See also the `Function Definitions`__ chapter from the Python documentation for more information about functions and decorators. .. __: https://docs.python.org/3/reference/compound_stmts.html#function-definitions .. note:: You can use a regular expression for the command name(s), but this is **not recommended** since version 7.1. For backward compatibility reason, this behavior will be kept until version 8.0. Regex pattern are confusing for your user, please don't use them for command name! If you still want to use a regex pattern, please use the :func:`rule` decorator instead. For extra arguments and subcommands based on a regex pattern, you should handle these inside your decorated function, by using the ``trigger`` object. """""" def add_attribute(function): if not hasattr(function, ""commands""): function.commands = [] for command in command_list: if command not in function.commands: function.commands.append(command) return function return add_attribute " 22662,"def rst2md(text, heading_levels): """"""Converts the RST text from the examples docstrigs and comments into markdown text for the Jupyter notebooks Parameters ---------- text: str RST input to be converted to MD heading_levels: dict Mapping of heading style ``(over_char, under_char)`` to heading level. Note that ``over_char`` is `None` when only underline is present. """""" # Characters recommend for use with headings # https://docutils.readthedocs.io/en/sphinx-docs/user/rst/quickstart.html#sections adornment_characters = ""=`:.'\""~^_*+#<>-"" headings = re.compile( r'(?P
\A|^[ \t]*\n)'  # Start of string or blank line above
        r'(?:(?P[{0}])(?P=over)*\n[ \t]*)?'  # Over, with heading space
        r'(?P\S[^\n]*)\n'  # Heading itself
        r'(?P(?(over)(?P=over)|[{0}]))(?P=under)*$'  # if over make same
        r''.format(adornment_characters),
        flags=re.M)

    text = re.sub(
        headings,
        lambda match: '{1}{0} {2}'.format(
            '#'*heading_levels[match.group('over', 'under')],
            *match.group('pre', 'heading')),
        text)

    math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^  .+)*)', flags=re.M)
    text = re.sub(math_eq,
                  lambda match: r'\begin{{align}}{0}\end{{align}}'.format(
                      match.group(1).strip()),
                  text)
    inline_math = re.compile(r':math:`(.+?)`', re.DOTALL)
    text = re.sub(inline_math, r'$\1$', text)

    directives = ('warning', 'note')
    for directive in directives:
        directive_re = re.compile(r'^\.\. %s::((?:.+)?(?:\n+^  .+)*)'
                                  % directive, flags=re.M)
        text = re.sub(directive_re,
                      partial(directive_fun, directive=directive), text)

    links = re.compile(r'^ *\.\. _.*:.*$\n', flags=re.M)
    text = re.sub(links, '', text)

    refs = re.compile(r':ref:`')
    text = re.sub(refs, '`', text)

    contents = re.compile(r'^\s*\.\. contents::.*$(\n +:\S+: *$)*\n',
                          flags=re.M)
    text = re.sub(contents, '', text)

    images = re.compile(
        r'^\.\. image::(.*$)(?:\n *:alt:(.*$)\n)?(?: +:\S+:.*$\n)*',
        flags=re.M)
    text = re.sub(
        images, lambda match: '![{1}]({0})\n'.format(
            match.group(1).strip(), (match.group(2) or '').strip()), text)

    return text

","def rst2md(text, heading_levels):
    """"""Converts the RST text from the examples docstrings and comments
    into markdown text for the Jupyter notebooks

    Parameters
    ----------
    text: str
        RST input to be converted to MD
    heading_levels: dict
        Mapping of heading style ``(over_char, under_char)`` to heading level.
        Note that ``over_char`` is `None` when only underline is present.
    """"""

    # Characters recommend for use with headings
    # https://docutils.readthedocs.io/en/sphinx-docs/user/rst/quickstart.html#sections
    adornment_characters = ""=`:.'\""~^_*+#<>-""
    headings = re.compile(
        r'(?P
\A|^[ \t]*\n)'  # Start of string or blank line above
        r'(?:(?P[{0}])(?P=over)*\n[ \t]*)?'  # Over, with heading space
        r'(?P\S[^\n]*)\n'  # Heading itself
        r'(?P(?(over)(?P=over)|[{0}]))(?P=under)*$'  # if over make same
        r''.format(adornment_characters),
        flags=re.M)

    text = re.sub(
        headings,
        lambda match: '{1}{0} {2}'.format(
            '#'*heading_levels[match.group('over', 'under')],
            *match.group('pre', 'heading')),
        text)

    math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^  .+)*)', flags=re.M)
    text = re.sub(math_eq,
                  lambda match: r'\begin{{align}}{0}\end{{align}}'.format(
                      match.group(1).strip()),
                  text)
    inline_math = re.compile(r':math:`(.+?)`', re.DOTALL)
    text = re.sub(inline_math, r'$\1$', text)

    directives = ('warning', 'note')
    for directive in directives:
        directive_re = re.compile(r'^\.\. %s::((?:.+)?(?:\n+^  .+)*)'
                                  % directive, flags=re.M)
        text = re.sub(directive_re,
                      partial(directive_fun, directive=directive), text)

    links = re.compile(r'^ *\.\. _.*:.*$\n', flags=re.M)
    text = re.sub(links, '', text)

    refs = re.compile(r':ref:`')
    text = re.sub(refs, '`', text)

    contents = re.compile(r'^\s*\.\. contents::.*$(\n +:\S+: *$)*\n',
                          flags=re.M)
    text = re.sub(contents, '', text)

    images = re.compile(
        r'^\.\. image::(.*$)(?:\n *:alt:(.*$)\n)?(?: +:\S+:.*$\n)*',
        flags=re.M)
    text = re.sub(
        images, lambda match: '![{1}]({0})\n'.format(
            match.group(1).strip(), (match.group(2) or '').strip()), text)

    return text

"
49114,"def nonlinsolve(system, *symbols):
    r""""""
    Solve system of $N$ nonlinear equations with $M$ variables, which means both
    under and overdetermined systems are supported. Positive dimensional
    system is also supported (A system with infinitely many solutions is said
    to be positive-dimensional). In a positive dimensional system the solution will
    be dependent on at least one symbol. Returns both real solution
    and complex solution (if they exist). The possible number of solutions
    is zero, one or infinite.

    Parameters
    ==========

    system : list of equations
        The target system of equations
    symbols : list of Symbols
        symbols should be given as a sequence eg. list

    Returns
    =======

    A :class:`~.FiniteSet` of ordered tuple of values of `symbols` for which the `system`
    has solution. Order of values in the tuple is same as symbols present in
    the parameter `symbols`.

    Please note that general :class:`~.FiniteSet` is unordered, the solution
    returned here is not simply a :class:`~.FiniteSet` of solutions, rather it
    is a :class:`~.FiniteSet` of ordered tuple, i.e. the first and only
    argument to :class:`~.FiniteSet` is a tuple of solutions, which is
    ordered, and, hence ,the returned solution is ordered.

    Also note that solution could also have been returned as an ordered tuple,
    FiniteSet is just a wrapper ``{}`` around the tuple. It has no other
    significance except for the fact it is just used to maintain a consistent
    output format throughout the solveset.

    For the given set of equations, the respective input types
    are given below:

    .. math:: xy - 1 = 0
    .. math:: 4x^2 + y^2 - 5 = 0

    ::

       system  = [x*y - 1, 4*x**2 + y**2 - 5]
       symbols = [x, y]

    Raises
    ======

    ValueError
        The input is not valid.
        The symbols are not given.
    AttributeError
        The input symbols are not `Symbol` type.

    Examples
    ========

    >>> from sympy import symbols, nonlinsolve
    >>> x, y, z = symbols('x, y, z', real=True)
    >>> nonlinsolve([x*y - 1, 4*x**2 + y**2 - 5], [x, y])
    {(-1, -1), (-1/2, -2), (1/2, 2), (1, 1)}

    1. Positive dimensional system and complements:

    >>> from sympy import pprint
    >>> from sympy.polys.polytools import is_zero_dimensional
    >>> a, b, c, d = symbols('a, b, c, d', extended_real=True)
    >>> eq1 =  a + b + c + d
    >>> eq2 = a*b + b*c + c*d + d*a
    >>> eq3 = a*b*c + b*c*d + c*d*a + d*a*b
    >>> eq4 = a*b*c*d - 1
    >>> system = [eq1, eq2, eq3, eq4]
    >>> is_zero_dimensional(system)
    False
    >>> pprint(nonlinsolve(system, [a, b, c, d]), use_unicode=False)
      -1       1               1      -1
    {(---, -d, -, {d} \ {0}), (-, -d, ---, {d} \ {0})}
       d       d               d       d
    >>> nonlinsolve([(x+y)**2 - 4, x + y - 2], [x, y])
    {(2 - y, y)}

    2. If some of the equations are non-polynomial then `nonlinsolve`
    will call the ``substitution`` function and return real and complex solutions,
    if present.

    >>> from sympy import exp, sin
    >>> nonlinsolve([exp(x) - sin(y), y**2 - 4], [x, y])
    {(ImageSet(Lambda(_n, I*(2*_n*pi + pi) + log(sin(2))), Integers), -2),
     (ImageSet(Lambda(_n, 2*_n*I*pi + log(sin(2))), Integers), 2)}

    3. If system is non-linear polynomial and zero-dimensional then it
    returns both solution (real and complex solutions, if present) using
    :func:`~.solve_poly_system`:

    >>> from sympy import sqrt
    >>> nonlinsolve([x**2 - 2*y**2 -2, x*y - 2], [x, y])
    {(-2, -1), (2, 1), (-sqrt(2)*I, sqrt(2)*I), (sqrt(2)*I, -sqrt(2)*I)}

    4. ``nonlinsolve`` can solve some linear (zero or positive dimensional)
    system (because it uses the :func:`sympy.polys.polytools.groebner` function to get the
    groebner basis and then uses the ``substitution`` function basis as the
    new `system`). But it is not recommended to solve linear system using
    ``nonlinsolve``, because :func:`~.linsolve` is better for general linear systems.

    >>> nonlinsolve([x + 2*y -z - 3, x - y - 4*z + 9, y + z - 4], [x, y, z])
    {(3*z - 5, 4 - z, z)}

    5. System having polynomial equations and only real solution is
    solved using :func:`~.solve_poly_system`:

    >>> e1 = sqrt(x**2 + y**2) - 10
    >>> e2 = sqrt(y**2 + (-x + 10)**2) - 3
    >>> nonlinsolve((e1, e2), (x, y))
    {(191/20, -3*sqrt(391)/20), (191/20, 3*sqrt(391)/20)}
    >>> nonlinsolve([x**2 + 2/y - 2, x + y - 3], [x, y])
    {(1, 2), (1 - sqrt(5), 2 + sqrt(5)), (1 + sqrt(5), 2 - sqrt(5))}
    >>> nonlinsolve([x**2 + 2/y - 2, x + y - 3], [y, x])
    {(2, 1), (2 - sqrt(5), 1 + sqrt(5)), (2 + sqrt(5), 1 - sqrt(5))}

    6. It is better to use symbols instead of trigonometric functions or
    :class:`~.Function`. For example, replace $\sin(x)$ with a symbol, replace
    $f(x)$ with a symbol and so on. Get a solution from ``nonlinsolve`` and then
    use :func:`~.solveset` to get the value of $x$.

    How nonlinsolve is better than old solver ``_solve_system`` :
    =============================================================

    1. A positive dimensional system solver: nonlinsolve can return
    solution for positive dimensional system. It finds the
    Groebner Basis of the positive dimensional system(calling it as
    basis) then we can start solving equation(having least number of
    variable first in the basis) using solveset and substituting that
    solved solutions into other equation(of basis) to get solution in
    terms of minimum variables. Here the important thing is how we
    are substituting the known values and in which equations.

    2. Real and complex solutions: nonlinsolve returns both real
    and complex solution. If all the equations in the system are polynomial
    then using :func:`~.solve_poly_system` both real and complex solution is returned.
    If all the equations in the system are not polynomial equation then goes to
    ``substitution`` method with this polynomial and non polynomial equation(s),
    to solve for unsolved variables. Here to solve for particular variable
    solveset_real and solveset_complex is used. For both real and complex
    solution ``_solve_using_know_values`` is used inside ``substitution``
    (``substitution`` will be called when any non-polynomial equation is present).
    If a solution is valid its general solution is added to the final result.

    3. :class:`~.Complement` and :class:`~.Intersection` will be added:
    nonlinsolve maintains dict for complements and intersections. If solveset
    find complements or/and intersections with any interval or set during the
    execution of ``substitution`` function, then complement or/and
    intersection for that variable is added before returning final solution.

    """"""
    from sympy.polys.polytools import is_zero_dimensional

    if not system:
        return S.EmptySet

    if not symbols:
        msg = ('Symbols must be given, for which solution of the '
               'system is to be found.')
        raise ValueError(filldedent(msg))

    if hasattr(symbols[0], '__iter__'):
        symbols = symbols[0]

    if not is_sequence(symbols) or not symbols:
        msg = ('Symbols must be given, for which solution of the '
               'system is to be found.')
        raise IndexError(filldedent(msg))

    system, symbols, swap = recast_to_symbols(system, symbols)
    if swap:
        soln = nonlinsolve(system, symbols)
        return FiniteSet(*[tuple(i.xreplace(swap) for i in s) for s in soln])

    if len(system) == 1 and len(symbols) == 1:
        return _solveset_work(system, symbols)

    # main code of def nonlinsolve() starts from here
    polys, polys_expr, nonpolys, denominators = _separate_poly_nonpoly(
        system, symbols)

    if len(symbols) == len(polys):
        # If all the equations in the system are poly
        if is_zero_dimensional(polys, symbols):
            # finite number of soln (Zero dimensional system)
            try:
                return _handle_zero_dimensional(polys, symbols, system)
            except NotImplementedError:
                # Right now it doesn't fail for any polynomial system of
                # equation. If `solve_poly_system` fails then `substitution`
                # method will handle it.
                result = substitution(
                    polys_expr, symbols, exclude=denominators)
                return result

        # positive dimensional system
        res = _handle_positive_dimensional(polys, symbols, denominators)
        if res is S.EmptySet and any(not p.domain.is_Exact for p in polys):
            raise NotImplementedError(""Equation not in exact domain. Try converting to rational"")
        else:
            return res

    else:
        # If all the equations are not polynomial.
        # Use `substitution` method for the system
        result = substitution(
            polys_expr + nonpolys, symbols, exclude=denominators)
    valid_solns = []
    if isinstance(result, FiniteSet):
        for soln in result.args:
            # the second of the following conditions is a hack
            # it is required because checksol can fail with cases like sin(n*pi)
            # so don't attempt to call checksol if the solution contains extra symbols
            if any(isinstance(v, Set) for v in soln) or soln.free_symbols.difference(symbols) \
                    or checksol(system, dict(zip(symbols, soln))) != False:
                valid_solns.append(soln)
        if len(valid_solns) > 0:
            return FiniteSet(*valid_solns)
        else:
            return S.EmptySet
    return result
","def nonlinsolve(system, *symbols):
    r""""""
    Solve system of $N$ nonlinear equations with $M$ variables, which means both
    under and overdetermined systems are supported. Positive dimensional
    system is also supported (A system with infinitely many solutions is said
    to be positive-dimensional). In a positive dimensional system the solution will
    be dependent on at least one symbol. Returns both real solution
    and complex solution (if they exist). The possible number of solutions
    is zero, one or infinite.

    Parameters
    ==========

    system : list of equations
        The target system of equations
    symbols : list of Symbols
        symbols should be given as a sequence eg. list

    Returns
    =======

    A :class:`~.FiniteSet` of ordered tuple of values of `symbols` for which the `system`
    has solution. Order of values in the tuple is same as symbols present in
    the parameter `symbols`.

    Please note that general :class:`~.FiniteSet` is unordered, the solution
    returned here is not simply a :class:`~.FiniteSet` of solutions, rather it
    is a :class:`~.FiniteSet` of ordered tuple, i.e. the first and only
    argument to :class:`~.FiniteSet` is a tuple of solutions, which is
    ordered, and, hence ,the returned solution is ordered.

    Also note that solution could also have been returned as an ordered tuple,
    FiniteSet is just a wrapper ``{}`` around the tuple. It has no other
    significance except for the fact it is just used to maintain a consistent
    output format throughout the solveset.

    For the given set of equations, the respective input types
    are given below:

    .. math:: xy - 1 = 0
    .. math:: 4x^2 + y^2 - 5 = 0

    ::

       system  = [x*y - 1, 4*x**2 + y**2 - 5]
       symbols = [x, y]

    Raises
    ======

    ValueError
        The input is not valid.
        The symbols are not given.
    AttributeError
        The input symbols are not `Symbol` type.

    Examples
    ========

    >>> from sympy import symbols, nonlinsolve
    >>> x, y, z = symbols('x, y, z', real=True)
    >>> nonlinsolve([x*y - 1, 4*x**2 + y**2 - 5], [x, y])
    {(-1, -1), (-1/2, -2), (1/2, 2), (1, 1)}

    1. Positive dimensional system and complements:

    >>> from sympy import pprint
    >>> from sympy.polys.polytools import is_zero_dimensional
    >>> a, b, c, d = symbols('a, b, c, d', extended_real=True)
    >>> eq1 =  a + b + c + d
    >>> eq2 = a*b + b*c + c*d + d*a
    >>> eq3 = a*b*c + b*c*d + c*d*a + d*a*b
    >>> eq4 = a*b*c*d - 1
    >>> system = [eq1, eq2, eq3, eq4]
    >>> is_zero_dimensional(system)
    False
    >>> pprint(nonlinsolve(system, [a, b, c, d]), use_unicode=False)
      -1       1               1      -1
    {(---, -d, -, {d} \ {0}), (-, -d, ---, {d} \ {0})}
       d       d               d       d
    >>> nonlinsolve([(x+y)**2 - 4, x + y - 2], [x, y])
    {(2 - y, y)}

    2. If some of the equations are non-polynomial then `nonlinsolve`
    will call the ``substitution`` function and return real and complex solutions,
    if present.

    >>> from sympy import exp, sin
    >>> nonlinsolve([exp(x) - sin(y), y**2 - 4], [x, y])
    {(ImageSet(Lambda(_n, I*(2*_n*pi + pi) + log(sin(2))), Integers), -2),
     (ImageSet(Lambda(_n, 2*_n*I*pi + log(sin(2))), Integers), 2)}

    3. If system is non-linear polynomial and zero-dimensional then it
    returns both solution (real and complex solutions, if present) using
    :func:`~.solve_poly_system`:

    >>> from sympy import sqrt
    >>> nonlinsolve([x**2 - 2*y**2 -2, x*y - 2], [x, y])
    {(-2, -1), (2, 1), (-sqrt(2)*I, sqrt(2)*I), (sqrt(2)*I, -sqrt(2)*I)}

    4. ``nonlinsolve`` can solve some linear (zero or positive dimensional)
    system (because it uses the :func:`sympy.polys.polytools.groebner` function to get the
    groebner basis and then uses the ``substitution`` function basis as the
    new `system`). But it is not recommended to solve linear system using
    ``nonlinsolve``, because :func:`~.linsolve` is better for general linear systems.

    >>> nonlinsolve([x + 2*y -z - 3, x - y - 4*z + 9, y + z - 4], [x, y, z])
    {(3*z - 5, 4 - z, z)}

    5. System having polynomial equations and only real solution is
    solved using :func:`~.solve_poly_system`:

    >>> e1 = sqrt(x**2 + y**2) - 10
    >>> e2 = sqrt(y**2 + (-x + 10)**2) - 3
    >>> nonlinsolve((e1, e2), (x, y))
    {(191/20, -3*sqrt(391)/20), (191/20, 3*sqrt(391)/20)}
    >>> nonlinsolve([x**2 + 2/y - 2, x + y - 3], [x, y])
    {(1, 2), (1 - sqrt(5), 2 + sqrt(5)), (1 + sqrt(5), 2 - sqrt(5))}
    >>> nonlinsolve([x**2 + 2/y - 2, x + y - 3], [y, x])
    {(2, 1), (2 - sqrt(5), 1 + sqrt(5)), (2 + sqrt(5), 1 - sqrt(5))}

    6. It is better to use symbols instead of trigonometric functions or
    :class:`~.Function`. For example, replace $\sin(x)$ with a symbol, replace
    $f(x)$ with a symbol and so on. Get a solution from ``nonlinsolve`` and then
    use :func:`~.solveset` to get the value of $x$.

    How nonlinsolve is better than old solver ``_solve_system`` :
    =============================================================

    1. A positive dimensional system solver: nonlinsolve can return
    solution for positive dimensional system. It finds the
    Groebner Basis of the positive dimensional system(calling it as
    basis) then we can start solving equation(having least number of
    variable first in the basis) using solveset and substituting that
    solved solutions into other equation(of basis) to get solution in
    terms of minimum variables. Here the important thing is how we
    are substituting the known values and in which equations.

    2. Real and complex solutions: nonlinsolve returns both real
    and complex solution. If all the equations in the system are polynomial
    then using :func:`~.solve_poly_system` both real and complex solution is returned.
    If all the equations in the system are not polynomial equation then goes to
    ``substitution`` method with this polynomial and non polynomial equation(s),
    to solve for unsolved variables. Here to solve for particular variable
    solveset_real and solveset_complex is used. For both real and complex
    solution ``_solve_using_know_values`` is used inside ``substitution``
    (``substitution`` will be called when any non-polynomial equation is present).
    If a solution is valid its general solution is added to the final result.

    3. :class:`~.Complement` and :class:`~.Intersection` will be added:
    nonlinsolve maintains dict for complements and intersections. If solveset
    find complements or/and intersections with any interval or set during the
    execution of ``substitution`` function, then complement or/and
    intersection for that variable is added before returning final solution.

    """"""
    from sympy.polys.polytools import is_zero_dimensional

    if not system:
        return S.EmptySet

    if not symbols:
        msg = ('Symbols must be given, for which solution of the '
               'system is to be found.')
        raise ValueError(filldedent(msg))

    if hasattr(symbols[0], '__iter__'):
        symbols = symbols[0]

    if not is_sequence(symbols) or not symbols:
        msg = ('Symbols must be given, for which solution of the '
               'system is to be found.')
        raise IndexError(filldedent(msg))

    system, symbols, swap = recast_to_symbols(system, symbols)
    if swap:
        soln = nonlinsolve(system, symbols)
        return FiniteSet(*[tuple(i.xreplace(swap) for i in s) for s in soln])

    if len(system) == 1 and len(symbols) == 1:
        return _solveset_work(system, symbols)

    # main code of def nonlinsolve() starts from here
    polys, polys_expr, nonpolys, denominators = _separate_poly_nonpoly(
        system, symbols)

    if len(symbols) == len(polys):
        # If all the equations in the system are poly
        if is_zero_dimensional(polys, symbols):
            # finite number of soln (Zero dimensional system)
            try:
                return _handle_zero_dimensional(polys, symbols, system)
            except NotImplementedError:
                # Right now it doesn't fail for any polynomial system of
                # equation. If `solve_poly_system` fails then `substitution`
                # method will handle it.
                result = substitution(
                    polys_expr, symbols, exclude=denominators)
                return result

        # positive dimensional system
        res = _handle_positive_dimensional(polys, symbols, denominators)
        if res is S.EmptySet and any(not p.domain.is_Exact for p in polys):
            raise NotImplementedError(""Equation not in exact domain. Try converting to rational"")
        else:
            return res

    else:
        # If all the equations are not polynomial.
        # Use `substitution` method for the system
        result = substitution(
            polys_expr + nonpolys, symbols, exclude=denominators)
    if isinstance(result, FiniteSet):
        valid_solns = []
        for soln in result.args:
            # the second of the following conditions is a hack
            # it is required because checksol can fail with cases like sin(n*pi)
            # so don't attempt to call checksol if the solution contains extra symbols
            if any(isinstance(v, Set) for v in soln) or soln.free_symbols.difference(symbols) \
                    or checksol(system, dict(zip(symbols, soln))) != False:
                valid_solns.append(soln)
        if len(valid_solns) > 0:
            return FiniteSet(*valid_solns)
        else:
            return S.EmptySet
    return result
"
56253,"def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)

    log.info('OpenVINO Inference Engine')
    log.info('\tbuild: {}'.format(get_version()))
    ie = IECore()

    plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)

    model = MonoDepthModel(ie, args.model)
    log.info('Reading model {}'.format(args.model))
    log_blobs_info(model)

    pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests)

    log.info('The model {} is loaded to {}'.format(args.model, args.device))
    log_runtime_settings(pipeline.exec_net, args.device)

    next_frame_id = 0
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()

    while True:
        if pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError(""Can't read an image from the input"")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2], args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(args.utilization_monitors, 55,
                                               (round(output_resolution[0] / 4), round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                                                         cap.fps(), output_resolution):
                    raise RuntimeError(""Can't open video writer"")
            # Submit for inference
            pipeline.submit_data(frame, next_frame_id, {'start_time': start_time})
            next_frame_id += 1
        else:
            # Wait for empty request
            pipeline.await_any()

        if pipeline.callback_exceptions:
            raise pipeline.callback_exceptions[0]
        # Process all completed requests
        results = pipeline.get_result(next_frame_id_to_show)
        if results:
            depth_map, frame_meta = results
            depth_map = apply_color_map(depth_map)

            start_time = frame_meta['start_time']
            presenter.drawGraphs(depth_map)
            metrics.update(start_time, depth_map)

            if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1):
                video_writer.write(depth_map)
            next_frame_id_to_show += 1


            if not args.no_show:
                cv2.imshow(DEMO_NAME, depth_map)
                key = cv2.waitKey(1)
                if key == 27 or key == 'q' or key == 'Q':
                    break
                presenter.handleKey(key)

    pipeline.await_all()
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = pipeline.get_result(next_frame_id_to_show)
        while results is None:
            results = pipeline.get_result(next_frame_id_to_show)
        depth_map, frame_meta = results
        depth_map = apply_color_map(depth_map)

        start_time = frame_meta['start_time']

        presenter.drawGraphs(depth_map)
        metrics.update(start_time, depth_map)

        if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1):
            video_writer.write(depth_map)

        if not args.no_show:
            cv2.imshow(DEMO_NAME, depth_map)
            key = cv2.waitKey(1)

    metrics.log_total()
    print(presenter.reportMeans())

","def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)

    log.info('OpenVINO Inference Engine')
    log.info('\tbuild: {}'.format(get_version()))
    ie = IECore()

    plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)

    model = MonoDepthModel(ie, args.model)
    log.info('Reading model {}'.format(args.model))
    log_blobs_info(model)

    pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests)

    log.info('The model {} is loaded to {}'.format(args.model, args.device))
    log_runtime_settings(pipeline.exec_net, args.device)

    next_frame_id = 0
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()

    while True:
        if pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError(""Can't read an image from the input"")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2], args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(args.utilization_monitors, 55,
                                               (round(output_resolution[0] / 4), round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                                                         cap.fps(), output_resolution):
                    raise RuntimeError(""Can't open video writer"")
            # Submit for inference
            pipeline.submit_data(frame, next_frame_id, {'start_time': start_time})
            next_frame_id += 1
        else:
            # Wait for empty request
            pipeline.await_any()

        if pipeline.callback_exceptions:
            raise pipeline.callback_exceptions[0]
        # Process all completed requests
        results = pipeline.get_result(next_frame_id_to_show)
        if results:
            depth_map, frame_meta = results
            depth_map = apply_color_map(depth_map)

            start_time = frame_meta['start_time']
            presenter.drawGraphs(depth_map)
            metrics.update(start_time, depth_map)

            if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1):
                video_writer.write(depth_map)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow(DEMO_NAME, depth_map)
                key = cv2.waitKey(1)
                if key == 27 or key == 'q' or key == 'Q':
                    break
                presenter.handleKey(key)

    pipeline.await_all()
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = pipeline.get_result(next_frame_id_to_show)
        while results is None:
            results = pipeline.get_result(next_frame_id_to_show)
        depth_map, frame_meta = results
        depth_map = apply_color_map(depth_map)

        start_time = frame_meta['start_time']

        presenter.drawGraphs(depth_map)
        metrics.update(start_time, depth_map)

        if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1):
            video_writer.write(depth_map)

        if not args.no_show:
            cv2.imshow(DEMO_NAME, depth_map)
            key = cv2.waitKey(1)

    metrics.log_total()
    print(presenter.reportMeans())

"
38449,"def dump_mortar_grid_to_file(gb, e, d, fn, max_1_grid_per_dim=False, dfn=False):
    """"""
    Dump a PorePy mortar grid to a file. The file format of the dumped file
    can be read by pp.read_grid_from_file. It is also compatible
    with a MRST (https://www.sintef.no/projectweb/mrst/) or an
    OPM unstructured grid ( https://opm-project.org/ ).

    Both the mortar grid and the mappings to the Primary and Secondary will
    be dumpt to file. The mappings will have the surfix ""_mapping""

    Parameters:
    gb  (GridBucket): The grid bucket associated with the mortar grid.
    e (Tuple(Grid, Grid)): The edge in gb associated with the mortar grid.
    d (Dict): The dictionary of the edge
    fn (String): The file name. The file name will be appended with an index.
        This will be passed to open() using 'w'
    max_1_grid_per_dim (bool): OPTIONAL, defaults to False. If True, the
        grid dimension will be used to index the file name. If False,
        the edge_number will be used.
    dfn (bool) OPTIONAL, defaults to False. If the gb is a DFN, set this value to
        True

    Returns:
    None
    """"""

    # Get the index of the grid and append it to file
    if max_1_grid_per_dim:
        grid_id = str(d[""mortar_grid""].dim)
    else:
        grid_id = str(d[""edge_number""])
    grid_name = append_id_(fn, ""mortar_"" + grid_id)
    mg = d[""mortar_grid""]
    mg.idx = d[""edge_number""]
    # We need to obtain the actuall mortar grids from the side grids
    mortar_grids = []
    for sg in mg.side_grids.values():
        mortar_grids.append(sg)
    # We store both sides of the grids as one grid.
    mortar_grid = pp.utils.grid_utils.merge_grids(mortar_grids)
    mortar_grid.idx = mg.idx
    dim = mortar_grid.dim

    # We need to set the maximum dimension = maximum dimension of the gb,
    # in order to export the coorect number of coordinates of vector-valued
    # geometry variables in the grid. If dfn, add 1 to the dimension.
    mortar_grid.dim = gb.dim_max() + dfn

    # Dump the mortar grids (or side grids in pp context).
    dump_grid_to_file(mortar_grid, grid_name)
    mortar_grid.dim = dim

    # Now, dump the mortar projections to the primary and secondary
    gl, gh = gb.nodes_of_edge(e)

    name = append_id_(fn, ""mapping_"" + grid_id)

    gh_to_mg = mg.mortar_to_primary_int()
    gl_to_mg = mg.mortar_to_secondary_int()

    dump_mortar_projections_to_file(gh, mg, gh_to_mg, name)
    dump_mortar_projections_to_file(gl, mg, gl_to_mg, name, ""a"")

","def dump_mortar_grid_to_file(gb, e, d, fn, max_1_grid_per_dim=False, dfn=False):
    """"""
    Dump a PorePy mortar grid to a file. The file format of the dumped file
    can be read by pp.read_grid_from_file. It is also compatible
    with a MRST (https://www.sintef.no/projectweb/mrst/) or an
    OPM unstructured grid ( https://opm-project.org/ ).

    Both the mortar grid and the mappings to the Primary and Secondary will
    be dumped to file. The mappings will have the surfix ""_mapping""

    Parameters:
    gb  (GridBucket): The grid bucket associated with the mortar grid.
    e (Tuple(Grid, Grid)): The edge in gb associated with the mortar grid.
    d (Dict): The dictionary of the edge
    fn (String): The file name. The file name will be appended with an index.
        This will be passed to open() using 'w'
    max_1_grid_per_dim (bool): OPTIONAL, defaults to False. If True, the
        grid dimension will be used to index the file name. If False,
        the edge_number will be used.
    dfn (bool) OPTIONAL, defaults to False. If the gb is a DFN, set this value to
        True

    Returns:
    None
    """"""

    # Get the index of the grid and append it to file
    if max_1_grid_per_dim:
        grid_id = str(d[""mortar_grid""].dim)
    else:
        grid_id = str(d[""edge_number""])
    grid_name = append_id_(fn, ""mortar_"" + grid_id)
    mg = d[""mortar_grid""]
    mg.idx = d[""edge_number""]
    # We need to obtain the actuall mortar grids from the side grids
    mortar_grids = []
    for sg in mg.side_grids.values():
        mortar_grids.append(sg)
    # We store both sides of the grids as one grid.
    mortar_grid = pp.utils.grid_utils.merge_grids(mortar_grids)
    mortar_grid.idx = mg.idx
    dim = mortar_grid.dim

    # We need to set the maximum dimension = maximum dimension of the gb,
    # in order to export the coorect number of coordinates of vector-valued
    # geometry variables in the grid. If dfn, add 1 to the dimension.
    mortar_grid.dim = gb.dim_max() + dfn

    # Dump the mortar grids (or side grids in pp context).
    dump_grid_to_file(mortar_grid, grid_name)
    mortar_grid.dim = dim

    # Now, dump the mortar projections to the primary and secondary
    gl, gh = gb.nodes_of_edge(e)

    name = append_id_(fn, ""mapping_"" + grid_id)

    gh_to_mg = mg.mortar_to_primary_int()
    gl_to_mg = mg.mortar_to_secondary_int()

    dump_mortar_projections_to_file(gh, mg, gh_to_mg, name)
    dump_mortar_projections_to_file(gl, mg, gl_to_mg, name, ""a"")

"
20025,"def image_fusion(img1, img2, wvs1, wvs2, array_type = None, filename = None):
    """""" Fuse two images of the same size together with given wavelengths representing and make a Spectral_data instance
    img1: 1st image to be fused
    img2: 2nd image to be fused
    wvs1: list of wavelengths represent bands in img1
    wvs2: list of wavelengths represent bands in img2
    array_type: (optional) description of the fused array
    filename: (optional) desired filename of the fused array

    :param img1: np.ndarray
    :param img2: np.ndarray
    :param wvs1: list
    :param wvs2: list
    :param array_type: str
    :param filename: str
    :return: fused_array (a Spectral_data instance)
    """"""

    if len(img1.shape) == 2:
        img1 = np.expand_dims(img1,axis=2)
    r1, c1, b1 = img1.shape

    if len(img2.shape) == 2:
        img2 = np.expand_dims(img2,axis=2)
    r2, c2, b2 = img2.shape
    if (r1,c1) != (r2,c2):
        fatal_error(""Input images should have the same image size"")

    array_data  = np.concatenate((img1, img2), axis=2)

    # sort all wavelengths
    wavelengths = np.array(wvs1 + wvs2)
    ind = np.argsort(wavelengths)
    wavelengths = wavelengths[ind]

    wavelength_dict = dict()
    for (idx, wv) in enumerate(wavelengths):
        wavelength_dict[wv] = float(idx)

    # sort array_data based on wavelengths
    array_data = array_data[:,:,ind]
    array_data = (array_data / 255).astype(np.float32)

    max_pixel = float(np.amax(array_data))
    min_pixel = float(np.amin(array_data))

    d_type = array_data.dtype

    r, c, b = array_data.shape

    fused_array = Spectral_data(array_data=array_data,
                                   max_wavelength=float(max(wavelengths)),
                                   min_wavelength=float(min(wavelengths)),
                                   max_value=max_pixel, min_value=min_pixel,
                                   d_type=d_type,
                                   wavelength_dict=wavelength_dict, samples=int(r * c),
                                   lines=int(b), interleave=""bil"",
                                   wavelength_units=""nm"", array_type=array_type,
                                   pseudo_rgb=None, filename=filename, default_bands=None)

    # Make pseudo-rgb image and replace it inside the class instance object
    pseudo_rgb = _make_pseudo_rgb(fused_array)
    fused_array.pseudo_rgb = pseudo_rgb

    _debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + ""_fused_pseudo_rgb.png""))

    return fused_array

","def image_fusion(img1, img2, wvs1, wvs2, array_type=None, filename=None):
    """""" Fuse two images of the same size together with given wavelengths representing and make a Spectral_data instance
    img1: 1st image to be fused
    img2: 2nd image to be fused
    wvs1: list of wavelengths represent bands in img1
    wvs2: list of wavelengths represent bands in img2
    array_type: (optional) description of the fused array
    filename: (optional) desired filename of the fused array

    :param img1: np.ndarray
    :param img2: np.ndarray
    :param wvs1: list
    :param wvs2: list
    :param array_type: str
    :param filename: str
    :return: fused_array (a Spectral_data instance)
    """"""

    if len(img1.shape) == 2:
        img1 = np.expand_dims(img1,axis=2)
    r1, c1, b1 = img1.shape

    if len(img2.shape) == 2:
        img2 = np.expand_dims(img2,axis=2)
    r2, c2, b2 = img2.shape
    if (r1,c1) != (r2,c2):
        fatal_error(""Input images should have the same image size"")

    array_data  = np.concatenate((img1, img2), axis=2)

    # sort all wavelengths
    wavelengths = np.array(wvs1 + wvs2)
    ind = np.argsort(wavelengths)
    wavelengths = wavelengths[ind]

    wavelength_dict = dict()
    for (idx, wv) in enumerate(wavelengths):
        wavelength_dict[wv] = float(idx)

    # sort array_data based on wavelengths
    array_data = array_data[:,:,ind]
    array_data = (array_data / 255).astype(np.float32)

    max_pixel = float(np.amax(array_data))
    min_pixel = float(np.amin(array_data))

    d_type = array_data.dtype

    r, c, b = array_data.shape

    fused_array = Spectral_data(array_data=array_data,
                                   max_wavelength=float(max(wavelengths)),
                                   min_wavelength=float(min(wavelengths)),
                                   max_value=max_pixel, min_value=min_pixel,
                                   d_type=d_type,
                                   wavelength_dict=wavelength_dict, samples=int(r * c),
                                   lines=int(b), interleave=""bil"",
                                   wavelength_units=""nm"", array_type=array_type,
                                   pseudo_rgb=None, filename=filename, default_bands=None)

    # Make pseudo-rgb image and replace it inside the class instance object
    pseudo_rgb = _make_pseudo_rgb(fused_array)
    fused_array.pseudo_rgb = pseudo_rgb

    _debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + ""_fused_pseudo_rgb.png""))

    return fused_array

"
1030,"def test_parallel_call_cached_function_defined_in_jupyter(tmpdir):
    # Calling an interactively defined memory.cache()'d function inside a
    # Parallel call used to clear the existing cache related to the said
    # function (https://github.com/joblib/joblib/issues/1035)

    # This tests checks that this is no longer the case.

    # TODO: test that the cache related to the function cache persists across
    # ipython sessions (provided that no code change were made to the
    # function's source)?

    # The first part of the test makes the necessary low-level calls to emulate
    # the definition of a function in an jupyter notebook cell. Joblib has
    # some custom code to treat functions defined specifically in jupyter
    # notebooks/ipython session -- we want to test this code, which requires
    # the emulation to be rigorous.
    ipython_cell_source = '''
    def f(x):
        return x
    '''

    exec(
        compile(
            textwrap.dedent(ipython_cell_source),
            filename='',
            mode='exec'
        )
    )
    # f is now accessible in the locals mapping - but for some unknown reason,
    # f = locals()['f'] throws a KeyError at runtime, we need to bind
    # locals()['f'] to a different name in the local namespace
    aliased_f = locals()['f']
    aliased_f.__module__ = ""__main__""

    # Preliminary sanity checks, and tests checking that joblib properly
    # identified f as an interactive function defined in a jupyter notebook
    assert aliased_f(1) == 1
    assert aliased_f.__code__.co_filename.startswith(' CommandResults:
    """"""
    Args:
        args(dict): args from demisto

    Returns: Command Results with context and human readable output
    """"""
    fields_to_search = args.get('fields_to_search', '')
    context = args.get('context', '{}')

    fields_to_search_array = [field.strip() for field in fields_to_search.split(',')]

    if not fields_to_search:
        raise ValueError('fields_to_search not specified')
    if not context:
        raise ValueError('context not specified')

    # Call the standalone function and get the raw response
    result = check_fields(fields_to_search_array, context)
    readable_output = f'Fields {"","".join(fields_to_search_array)} are in given context.' if result \
        else 'There are some fields that not in context.'

    return CommandResults(
        outputs_prefix='CheckIfFieldsExists.FieldsExists',
        outputs_key_field='',
        outputs=result,
        readable_output=readable_output
    )
","def check_fields_command(args: Dict[str, Any]) -> CommandResults:
    """"""
    Args:
        args(dict): args from demisto

    Returns: Command Results with context and human readable output
    """"""
    fields_to_search = args.get(argToList('fields_to_search', ''))
    context = args.get('context', '{}')

    if not fields_to_search:
        raise ValueError('fields_to_search not specified')
    if not context:
        raise ValueError('context not specified')

    # Call the standalone function and get the raw response
    result = check_fields(fields_to_search_array, context)
    readable_output = f'Fields {"","".join(fields_to_search_array)} are in given context.' if result \
        else 'There are some fields that not in context.'

    return CommandResults(
        outputs_prefix='CheckIfFieldsExists.FieldsExists',
        outputs_key_field='',
        outputs=result,
        readable_output=readable_output
    )
"
54295,"def tmu_tilde(mu, data, pdf, init_pars, par_bounds):
    r""""""
    The test statistic, :math:`t_{\mu}`, for establishing an two-sided
    intervals on the strength parameter, :math:`\mu` for models with
    bounded POI.

    Args:
        mu (Number or Tensor): The signal strength parameter
        data (Tensor): The data to be considered
        pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
        init_pars (`list`): Values to initialize the model parameters at for the fit
        par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit

    Returns:
        Float: The calculated test statistic, :math:`q_{\mu}`
    """"""
    if pdf.config.poi_index is None:
        raise UnspecifiedPOI(
            'No POI is defined. A POI is required for profile likelihood based test statistics.'
        )
    if par_bounds[pdf.config.poi_index][0] != 0:
        log.warning(
            'tmu tilde test statistic used for fit configuration with POI not bounded at zero. Use tmu.'
        )
    return _tmu_like(mu, data, pdf, init_pars, par_bounds)
","def tmu_tilde(mu, data, pdf, init_pars, par_bounds):
    r""""""
    The test statistic, :math:`t_{\mu}`, for establishing a two-sided
    interval on the strength parameter, :math:`\mu` for models with
    bounded POI.

    Args:
        mu (Number or Tensor): The signal strength parameter
        data (Tensor): The data to be considered
        pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
        init_pars (`list`): Values to initialize the model parameters at for the fit
        par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit

    Returns:
        Float: The calculated test statistic, :math:`q_{\mu}`
    """"""
    if pdf.config.poi_index is None:
        raise UnspecifiedPOI(
            'No POI is defined. A POI is required for profile likelihood based test statistics.'
        )
    if par_bounds[pdf.config.poi_index][0] != 0:
        log.warning(
            'tmu tilde test statistic used for fit configuration with POI not bounded at zero. Use tmu.'
        )
    return _tmu_like(mu, data, pdf, init_pars, par_bounds)
"
21950,"def _determine_omega_vector(syslist, omega_in, omega_limits, omega_num,
                            Hz=None):
    """"""Determine the frequency range for a frequency-domain plot
    according to a standard logic.

    If omega_in and omega_limits are both None, then omega_out is computed
    on omega_num points according to a default logic defined by
    _default_frequency_range and tailored for the list of systems syslist, and
    omega_range_given is set to False.
    If omega_in is None but omega_limits is an array-like of 2 elements, then
    omega_out is computed with the function np.logspace on omega_num points
    within the interval [min, max] =  [omega_limits[0], omega_limits[1]], and
    omega_range_given is set to True.
    If omega_in is not None, then omega_out is set to omega_in,
    and omega_range_given is set to True

    Parameters
    ----------
    syslist : list of LTI
        List of linear input/output systems (single system is OK)
    omega_in : 1D array_like or None
        Frequency range specified by the user
    omega_limits : 1D array_like or None
        Frequency limits specified by the user
    omega_num : int
        Number of points to be used for the frequency
        range (if the frequency range is not user-specified)
    Hz : bool. optional
        If True, the limits (first and last value) of the frequencies
        are set to full decades in Hz so it fits plotting with logarithmic
        scale in Hz otherwise in rad/s. Omega is always returned in rad/sec.

    Returns
    -------
    omega_out : 1D array
        Frequency range to be used
    omega_range_given : bool
        True if the frequency range was specified by the user, either through
        omega_in or through omega_limits. False if both omega_in
        and omega_limits are None.
    """"""
    omega_range_given = True

    if omega_in is None:
        if omega_limits is None:
            omega_range_given = False
            # Select a default range if none is provided
            omega_out = _default_frequency_range(syslist,
                                                 number_of_samples=omega_num,
                                                 Hz=Hz)
        else:
            omega_limits = np.asarray(omega_limits)
            if len(omega_limits) != 2:
                raise ValueError(""len(omega_limits) must be 2"")
            omega_out = np.logspace(np.log10(omega_limits[0]),
                                    np.log10(omega_limits[1]),
                                    num=omega_num, endpoint=True)
    else:
        omega_out = np.copy(omega_in)
    return omega_out, omega_range_given

","def _determine_omega_vector(syslist, omega_in, omega_limits, omega_num,
                            Hz=None):
    """"""Determine the frequency range for a frequency-domain plot
    according to a standard logic.

    If omega_in and omega_limits are both None, then omega_out is computed
    on omega_num points according to a default logic defined by
    _default_frequency_range and tailored for the list of systems syslist, and
    omega_range_given is set to False.
    If omega_in is None but omega_limits is an array-like of 2 elements, then
    omega_out is computed with the function np.logspace on omega_num points
    within the interval [min, max] =  [omega_limits[0], omega_limits[1]], and
    omega_range_given is set to True.
    If omega_in is not None, then omega_out is set to omega_in,
    and omega_range_given is set to True

    Parameters
    ----------
    syslist : list of LTI
        List of linear input/output systems (single system is OK)
    omega_in : 1D array_like or None
        Frequency range specified by the user
    omega_limits : 1D array_like or None
        Frequency limits specified by the user
    omega_num : int
        Number of points to be used for the frequency
        range (if the frequency range is not user-specified)
    Hz : bool, optional
        If True, the limits (first and last value) of the frequencies
        are set to full decades in Hz so it fits plotting with logarithmic
        scale in Hz otherwise in rad/s. Omega is always returned in rad/sec.

    Returns
    -------
    omega_out : 1D array
        Frequency range to be used
    omega_range_given : bool
        True if the frequency range was specified by the user, either through
        omega_in or through omega_limits. False if both omega_in
        and omega_limits are None.
    """"""
    omega_range_given = True

    if omega_in is None:
        if omega_limits is None:
            omega_range_given = False
            # Select a default range if none is provided
            omega_out = _default_frequency_range(syslist,
                                                 number_of_samples=omega_num,
                                                 Hz=Hz)
        else:
            omega_limits = np.asarray(omega_limits)
            if len(omega_limits) != 2:
                raise ValueError(""len(omega_limits) must be 2"")
            omega_out = np.logspace(np.log10(omega_limits[0]),
                                    np.log10(omega_limits[1]),
                                    num=omega_num, endpoint=True)
    else:
        omega_out = np.copy(omega_in)
    return omega_out, omega_range_given

"
5364,"def test_uptodate_with_changes():
    """"""
    Test pkg.uptodate with simulated changes
    """"""

    pkgs = {
        ""pkga"": {""old"": ""1.0.1"", ""new"": ""2.0.1""},
        ""pkgb"": {""old"": ""1.0.2"", ""new"": ""2.0.2""},
        ""pkgc"": {""old"": ""1.0.3"", ""new"": ""2.0.3""},
    }

    list_upgrades = MagicMock(
        return_value={pkgname: pkgver[""new""] for pkgname, pkgver in pkgs.items()}
    )
    upgrade = MagicMock(return_value=pkgs)
    version = MagicMock(side_effect=lambda pkgname, **_: pkgs[pkgname][""old""])

    with patch.dict(
        pkg.__salt__,
        {
            ""pkg.list_upgrades"": list_upgrades,
            ""pkg.upgrade"": upgrade,
            ""pkg.version"": version,
        },
    ):

        # Run state with test=false
        with patch.dict(pkg.__opts__, {""test"": False}):
            ret = pkg.uptodate(""dummy"", test=True)
            assert ret[""result""]
            assert ret[""changes""] == pkgs

        # Run state with test=true
        with patch.dict(pkg.__opts__, {""test"": True}):
            ret = pkg.uptodate(""dummy"", test=True)
            assert ret[""result""] is None
            assert ret[""changes""] == pkgs

","def test_uptodate_with_changes(pkgs):
    """"""
    Test pkg.uptodate with simulated changes
    """"""

    pkgs = {
        ""pkga"": {""old"": ""1.0.1"", ""new"": ""2.0.1""},
        ""pkgb"": {""old"": ""1.0.2"", ""new"": ""2.0.2""},
        ""pkgc"": {""old"": ""1.0.3"", ""new"": ""2.0.3""},
    }

    list_upgrades = MagicMock(
        return_value={pkgname: pkgver[""new""] for pkgname, pkgver in pkgs.items()}
    )
    upgrade = MagicMock(return_value=pkgs)
    version = MagicMock(side_effect=lambda pkgname, **_: pkgs[pkgname][""old""])

    with patch.dict(
        pkg.__salt__,
        {
            ""pkg.list_upgrades"": list_upgrades,
            ""pkg.upgrade"": upgrade,
            ""pkg.version"": version,
        },
    ):

        # Run state with test=false
        with patch.dict(pkg.__opts__, {""test"": False}):
            ret = pkg.uptodate(""dummy"", test=True)
            assert ret[""result""]
            assert ret[""changes""] == pkgs

        # Run state with test=true
        with patch.dict(pkg.__opts__, {""test"": True}):
            ret = pkg.uptodate(""dummy"", test=True)
            assert ret[""result""] is None
            assert ret[""changes""] == pkgs

"
31094,"def query_logs_command(args: dict, client: Client) -> Tuple[str, Dict[str, List[dict]], List[Dict[str, Any]]]:
    """"""
    Return the result of querying the Logging service
    """"""
    query = args.get('query', '')
    limit = args.get('limit', '')
    transform_results = argToBoolean(args.get('transform_results', 'Yes'))

    if 'limit' not in query.lower():
        query += f' LIMIT {limit}'

    records, raw_results = client.query_loggings(query)

    table_name = get_table_name(query)
    output_results = records if not transform_results else [common_context_transformer(record) for record in records]
    human_readable = tableToMarkdown('Logs ' + table_name + ' table', output_results, removeNull=True)
    ec = {
        'CDL.Logging': output_results
    }
    return human_readable, ec, raw_results

","def query_logs_command(args: dict, client: Client) -> Tuple[str, Dict[str, List[dict]], List[Dict[str, Any]]]:
    """"""
    Return the result of querying the Logging service
    """"""
    query = args.get('query', '')
    limit = args.get('limit', '')
    transform_results = argToBoolean(args.get('transform_results', 'true'))

    if 'limit' not in query.lower():
        query += f' LIMIT {limit}'

    records, raw_results = client.query_loggings(query)

    table_name = get_table_name(query)
    output_results = records if not transform_results else [common_context_transformer(record) for record in records]
    human_readable = tableToMarkdown('Logs ' + table_name + ' table', output_results, removeNull=True)
    ec = {
        'CDL.Logging': output_results
    }
    return human_readable, ec, raw_results

"
41049,"def tedana_workflow(
    data,
    tes,
    out_dir=""."",
    mask=None,
    convention=""bids"",
    prefix="""",
    fittype=""loglin"",
    combmode=""t2s"",
    tedpca=""aic"",
    fixed_seed=42,
    maxit=500,
    maxrestart=10,
    tedort=False,
    gscontrol=None,
    no_reports=False,
    png_cmap=""coolwarm"",
    verbose=False,
    low_mem=False,
    debug=False,
    quiet=False,
    t2smap=None,
    mixm=None,
    ctab=None,
    manacc=None,
):
    """"""
    Run the ""canonical"" TE-Dependent ANAlysis workflow.

    Please remember to cite [1]_.

    Parameters
    ----------
    data : :obj:`str` or :obj:`list` of :obj:`str`
        Either a single z-concatenated file (single-entry list or str) or a
        list of echo-specific files, in ascending order.
    tes : :obj:`list`
        List of echo times associated with data in milliseconds.
    out_dir : :obj:`str`, optional
        Output directory.
    mask : :obj:`str` or None, optional
        Binary mask of voxels to include in TE Dependent ANAlysis. Must be
        spatially aligned with `data`. If an explicit mask is not provided,
        then Nilearn's compute_epi_mask function will be used to derive a mask
        from the first echo's data.
    fittype : {'loglin', 'curvefit'}, optional
        Monoexponential fitting method. 'loglin' uses the the default linear
        fit to the log of the data. 'curvefit' uses a monoexponential fit to
        the raw data, which is slightly slower but may be more accurate.
        Default is 'loglin'.
    combmode : {'t2s'}, optional
        Combination scheme for TEs: 't2s' (Posse 1999, default).
    tedpca : {'mdl', 'aic', 'kic', 'kundu', 'kundu-stabilize', float}, optional
        Method with which to select components in TEDPCA.
        If a float is provided, then it is assumed to represent percentage of variance
        explained (0-1) to retain from PCA.
        Default is 'aic'.
    tedort : :obj:`bool`, optional
        Orthogonalize rejected components w.r.t. accepted ones prior to
        denoising. Default is False.
    gscontrol : {None, 'mir', 'gsr'} or :obj:`list`, optional
        Perform additional denoising to remove spatially diffuse noise. Default
        is None.
    verbose : :obj:`bool`, optional
        Generate intermediate and additional files. Default is False.
    no_reports : obj:'bool', optional
        Do not generate .html reports and .png plots. Default is false such
        that reports are generated.
    png_cmap : obj:'str', optional
        Name of a matplotlib colormap to be used when generating figures.
        Cannot be used with --no-png. Default is 'coolwarm'.
    t2smap : :obj:`str`, optional
        Precalculated T2* map in the same space as the input data. Values in
        the map must be in seconds.
    mixm : :obj:`str` or None, optional
        File containing mixing matrix, to be used when re-running the workflow.
        If not provided, ME-PCA and ME-ICA are done. Default is None.
    ctab : :obj:`str` or None, optional
        File containing component table from which to extract pre-computed
        classifications, to be used with 'mixm' when re-running the workflow.
        Default is None.
    manacc : :obj:`list` of :obj:`int` or None, optional
        List of manually accepted components. Can be a list of the components
        numbers or None.
        If provided, this parameter requires ``mixm`` and ``ctab`` to be provided as well.
        Default is None.

    Other Parameters
    ----------------
    fixed_seed : :obj:`int`, optional
        Value passed to ``mdp.numx_rand.seed()``.
        Set to a positive integer value for reproducible ICA results;
        otherwise, set to -1 for varying results across calls.
    maxit : :obj:`int`, optional
        Maximum number of iterations for ICA. Default is 500.
    maxrestart : :obj:`int`, optional
        Maximum number of attempts for ICA. If ICA fails to converge, the
        fixed seed will be updated and ICA will be run again. If convergence
        is achieved before maxrestart attempts, ICA will finish early.
        Default is 10.
    low_mem : :obj:`bool`, optional
        Enables low-memory processing, including the use of IncrementalPCA.
        May increase workflow duration. Default is False.
    debug : :obj:`bool`, optional
        Whether to run in debugging mode or not. Default is False.
    quiet : :obj:`bool`, optional
        If True, suppresses logging/printing of messages. Default is False.

    Notes
    -----
    This workflow writes out several files. For a complete list of the files
    generated by this workflow, please visit
    https://tedana.readthedocs.io/en/latest/outputs.html

    References
    ----------
    .. [1] DuPre, E. M., Salo, T., Ahmed, Z., Bandettini, P. A., Bottenhorn, K. L.,
           Caballero-Gaudes, C., Dowdle, L. T., Gonzalez-Castillo, J., Heunis, S.,
           Kundu, P., Laird, A. R., Markello, R., Markiewicz, C. J., Moia, S.,
           Staden, I., Teves, J. B., Uruñuela, E., Vaziri-Pashkam, M.,
           Whitaker, K., & Handwerker, D. A. (2021).
           TE-dependent analysis of multi-echo fMRI with tedana.
           Journal of Open Source Software, 6(66), 3669. doi:10.21105/joss.03669.
    """"""
    out_dir = op.abspath(out_dir)
    if not op.isdir(out_dir):
        os.mkdir(out_dir)

    # boilerplate
    basename = ""report""
    extension = ""txt""
    repname = op.join(out_dir, (basename + ""."" + extension))
    repex = op.join(out_dir, (basename + ""*""))
    previousreps = glob(repex)
    previousreps.sort(reverse=True)
    for f in previousreps:
        previousparts = op.splitext(f)
        newname = previousparts[0] + ""_old"" + previousparts[1]
        os.rename(f, newname)
    refname = op.join(out_dir, ""_references.txt"")

    # create logfile name
    basename = ""tedana_""
    extension = ""tsv""
    start_time = datetime.datetime.now().strftime(""%Y-%m-%dT%H%M%S"")
    logname = op.join(out_dir, (basename + start_time + ""."" + extension))
    utils.setup_loggers(logname, repname, refname, quiet=quiet, debug=debug)

    LGR.info(""Using output directory: {}"".format(out_dir))

    # ensure tes are in appropriate format
    tes = [float(te) for te in tes]
    n_echos = len(tes)

    # Coerce gscontrol to list
    if not isinstance(gscontrol, list):
        gscontrol = [gscontrol]

    # Check value of tedpca *if* it is a predefined string,
    # a float between 0 & 1 or an int >= 1
    tedpca = check_tedpca_value(tedpca, is_parser=False)

    LGR.info(""Loading input data: {}"".format([f for f in data]))
    catd, ref_img = io.load_data(data, n_echos=n_echos)
    io_generator = io.OutputGenerator(
        ref_img,
        convention=convention,
        out_dir=out_dir,
        prefix=prefix,
        config=""auto"",
        verbose=verbose,
    )

    n_samp, n_echos, n_vols = catd.shape
    LGR.debug(""Resulting data shape: {}"".format(catd.shape))

    # check if TR is 0
    img_t_r = io_generator.reference_img.header.get_zooms()[-1]
    if img_t_r == 0:
        raise IOError(
            ""Dataset has a TR of 0. This indicates incorrect""
            "" header information. To correct this, we recommend""
            "" using this snippet:""
            ""\n""
            ""https://gist.github.com/jbteves/032c87aeb080dd8de8861cb151bff5d6""
            ""\n""
            ""to correct your TR to the value it should be.""
        )

    if mixm is not None and op.isfile(mixm):
        mixm = op.abspath(mixm)
        # Allow users to re-run on same folder
        mixing_name = io_generator.get_name(""ICA mixing tsv"")
        if mixm != mixing_name:
            shutil.copyfile(mixm, mixing_name)
            shutil.copyfile(mixm, op.join(io_generator.out_dir, op.basename(mixm)))
    elif mixm is not None:
        raise IOError(""Argument 'mixm' must be an existing file."")

    if ctab is not None and op.isfile(ctab):
        ctab = op.abspath(ctab)
        # Allow users to re-run on same folder
        metrics_name = io_generator.get_name(""ICA metrics tsv"")
        if ctab != metrics_name:
            shutil.copyfile(ctab, metrics_name)
            shutil.copyfile(ctab, op.join(io_generator.out_dir, op.basename(ctab)))
    elif ctab is not None:
        raise IOError(""Argument 'ctab' must be an existing file."")

    if ctab and not mixm:
        LGR.warning(""Argument 'ctab' requires argument 'mixm'."")
        ctab = None
    elif manacc is not None and (not mixm or not ctab):
        LGR.warning(""Argument 'manacc' requires arguments 'mixm' and 'ctab'."")
        manacc = None
    elif manacc is not None:
        # coerce to list of integers
        manacc = [int(m) for m in manacc]

    if t2smap is not None and op.isfile(t2smap):
        t2smap_file = io_generator.get_name(""t2star img"")
        t2smap = op.abspath(t2smap)
        # Allow users to re-run on same folder
        if t2smap != t2smap_file:
            shutil.copyfile(t2smap, t2smap_file)
    elif t2smap is not None:
        raise IOError(""Argument 't2smap' must be an existing file."")

    RepLGR.info(
        ""TE-dependence analysis was performed on input data using the tedana workflow ""
        ""(DuPre, Salo et al., 2021).""
    )
    RefLGR.info(
        ""DuPre, E. M., Salo, T., Ahmed, Z., Bandettini, P. A., Bottenhorn, K. L., ""
        ""Caballero-Gaudes, C., Dowdle, L. T., Gonzalez-Castillo, J., Heunis, S., ""
        ""Kundu, P., Laird, A. R., Markello, R., Markiewicz, C. J., Moia, S., ""
        ""Staden, I., Teves, J. B., Uruñuela, E., Vaziri-Pashkam, M., ""
        ""Whitaker, K., & Handwerker, D. A. (2021). ""
        ""TE-dependent analysis of multi-echo fMRI with tedana. ""
        ""Journal of Open Source Software, 6(66), 3669. doi:10.21105/joss.03669.""
    )

    if mask and not t2smap:
        # TODO: add affine check
        LGR.info(""Using user-defined mask"")
        RepLGR.info(""A user-defined mask was applied to the data."")
    elif t2smap and not mask:
        LGR.info(""Using user-defined T2* map to generate mask"")
        t2s_limited_sec = utils.reshape_niimg(t2smap)
        t2s_limited = utils.sec2millisec(t2s_limited_sec)
        t2s_full = t2s_limited.copy()
        mask = (t2s_limited != 0).astype(int)
    elif t2smap and mask:
        LGR.info(""Combining user-defined mask and T2* map to generate mask"")
        t2s_limited_sec = utils.reshape_niimg(t2smap)
        t2s_limited = utils.sec2millisec(t2s_limited_sec)
        t2s_full = t2s_limited.copy()
        mask = utils.reshape_niimg(mask)
        mask[t2s_limited == 0] = 0  # reduce mask based on T2* map
    else:
        LGR.info(""Computing EPI mask from first echo"")
        first_echo_img = io.new_nii_like(io_generator.reference_img, catd[:, 0, :])
        mask = compute_epi_mask(first_echo_img)
        RepLGR.info(
            ""An initial mask was generated from the first echo using ""
            ""nilearn's compute_epi_mask function.""
        )

    # Create an adaptive mask with at least 1 good echo, for denoising
    mask_denoise, masksum_denoise = utils.make_adaptive_mask(
        catd,
        mask=mask,
        getsum=True,
        threshold=1,
    )
    LGR.debug(""Retaining {}/{} samples for denoising"".format(mask_denoise.sum(), n_samp))
    io_generator.save_file(masksum_denoise, ""adaptive mask img"")

    # Create an adaptive mask with at least 3 good echoes, for classification
    masksum_clf = masksum_denoise.copy()
    masksum_clf[masksum_clf < 3] = 0
    mask_clf = masksum_clf.astype(bool)
    RepLGR.info(
        ""A two-stage masking procedure was applied, in which a liberal mask ""
        ""(including voxels with good data in at least the first echo) was used for ""
        ""optimal combination, T2*/S0 estimation, and denoising, while a more conservative mask ""
        ""(restricted to voxels with good data in at least the first three echoes) was used for ""
        ""the component classification procedure.""
    )
    LGR.debug(""Retaining {}/{} samples for classification"".format(mask_clf.sum(), n_samp))

    if t2smap is None:
        LGR.info(""Computing T2* map"")
        t2s_limited, s0_limited, t2s_full, s0_full = decay.fit_decay(
            catd, tes, mask_denoise, masksum_denoise, fittype
        )

        # set a hard cap for the T2* map
        # anything that is 10x higher than the 99.5 %ile will be reset to 99.5 %ile
        cap_t2s = stats.scoreatpercentile(t2s_full.flatten(), 99.5, interpolation_method=""lower"")
        LGR.debug(""Setting cap on T2* map at {:.5f}s"".format(utils.millisec2sec(cap_t2s)))
        t2s_full[t2s_full > cap_t2s * 10] = cap_t2s
        io_generator.save_file(utils.millisec2sec(t2s_full), ""t2star img"")
        io_generator.save_file(s0_full, ""s0 img"")

        if verbose:
            io_generator.save_file(utils.millisec2sec(t2s_limited), ""limited t2star img"")
            io_generator.save_file(s0_limited, ""limited s0 img"")

    # optimally combine data
    data_oc = combine.make_optcom(catd, tes, masksum_denoise, t2s=t2s_full, combmode=combmode)

    # regress out global signal unless explicitly not desired
    if ""gsr"" in gscontrol:
        catd, data_oc = gsc.gscontrol_raw(catd, data_oc, n_echos, io_generator)

    fout = io_generator.save_file(data_oc, ""combined img"")
    LGR.info(""Writing optimally combined data set: {}"".format(fout))

    if mixm is None:
        # Identify and remove thermal noise from data
        dd, n_components = decomposition.tedpca(
            catd,
            data_oc,
            combmode,
            mask_clf,
            masksum_clf,
            t2s_full,
            io_generator,
            tes=tes,
            algorithm=tedpca,
            kdaw=10.0,
            rdaw=1.0,
            verbose=verbose,
            low_mem=low_mem,
        )
        if verbose:
            io_generator.save_file(utils.unmask(dd, mask_clf), ""whitened img"")

        # Perform ICA, calculate metrics, and apply decision tree
        # Restart when ICA fails to converge or too few BOLD components found
        keep_restarting = True
        n_restarts = 0
        seed = fixed_seed
        while keep_restarting:
            mmix, seed = decomposition.tedica(
                dd, n_components, seed, maxit, maxrestart=(maxrestart - n_restarts)
            )
            seed += 1
            n_restarts = seed - fixed_seed

            # Estimate betas and compute selection metrics for mixing matrix
            # generated from dimensionally reduced data using full data (i.e., data
            # with thermal noise)
            LGR.info(""Making second component selection guess from ICA results"")
            required_metrics = [
                ""kappa"",
                ""rho"",
                ""countnoise"",
                ""countsigFT2"",
                ""countsigFS0"",
                ""dice_FT2"",
                ""dice_FS0"",
                ""signal-noise_t"",
                ""variance explained"",
                ""normalized variance explained"",
                ""d_table_score"",
            ]
            comptable = metrics.collect.generate_metrics(
                catd,
                data_oc,
                mmix,
                masksum_clf,
                tes,
                io_generator,
                ""ICA"",
                metrics=required_metrics,
            )
            comptable, metric_metadata = selection.kundu_selection_v2(comptable, n_echos, n_vols)

            n_bold_comps = comptable[comptable.classification == ""accepted""].shape[0]
            if (n_restarts < maxrestart) and (n_bold_comps == 0):
                LGR.warning(""No BOLD components found. Re-attempting ICA."")
            elif n_bold_comps == 0:
                LGR.warning(""No BOLD components found, but maximum number of restarts reached."")
                keep_restarting = False
            else:
                keep_restarting = False

            RepLGR.disabled = True  # Disable the report to avoid duplicate text
        RepLGR.disabled = False  # Re-enable the report after the while loop is escaped
    else:
        LGR.info(""Using supplied mixing matrix from ICA"")
        mixing_file = io_generator.get_name(""ICA mixing tsv"")
        mmix = pd.read_table(mixing_file).values

        if ctab is None:
            required_metrics = [
                ""kappa"",
                ""rho"",
                ""countnoise"",
                ""countsigFT2"",
                ""countsigFS0"",
                ""dice_FT2"",
                ""dice_FS0"",
                ""signal-noise_t"",
                ""variance explained"",
                ""normalized variance explained"",
                ""d_table_score"",
            ]
            comptable = metrics.collect.generate_metrics(
                catd,
                data_oc,
                mmix,
                masksum_clf,
                tes,
                io_generator,
                ""ICA"",
                metrics=required_metrics,
            )
            comptable, metric_metadata = selection.kundu_selection_v2(comptable, n_echos, n_vols)
        else:
            LGR.info(""Using supplied component table for classification"")
            comptable = pd.read_table(ctab)
            # Change rationale value of rows with NaN to empty strings
            comptable.loc[comptable.rationale.isna(), ""rationale""] = """"

            if manacc is not None:
                comptable, metric_metadata = selection.manual_selection(comptable, acc=manacc)

    # Write out ICA files.
    comp_names = comptable[""Component""].values
    mixing_df = pd.DataFrame(data=mmix, columns=comp_names)
    io_generator.save_file(mixing_df, ""ICA mixing tsv"")
    betas_oc = utils.unmask(computefeats2(data_oc, mmix, mask_denoise), mask_denoise)
    io_generator.save_file(betas_oc, ""z-scored ICA components img"")

    # Save component table and associated json
    io_generator.save_file(comptable, ""ICA metrics tsv"")
    metric_metadata = metrics.collect.get_metadata(comptable)
    io_generator.save_file(metric_metadata, ""ICA metrics json"")

    decomp_metadata = {
        ""Method"": (
            ""Independent components analysis with FastICA algorithm implemented by sklearn. ""
        ),
    }
    for comp_name in comp_names:
        decomp_metadata[comp_name] = {
            ""Description"": ""ICA fit to dimensionally-reduced optimally combined data."",
            ""Method"": ""tedana"",
        }
    with open(io_generator.get_name(""ICA decomposition json""), ""w"") as fo:
        json.dump(decomp_metadata, fo, sort_keys=True, indent=4)

    if comptable[comptable.classification == ""accepted""].shape[0] == 0:
        LGR.warning(""No BOLD components detected! Please check data and results!"")

    mmix_orig = mmix.copy()
    if tedort:
        acc_idx = comptable.loc[~comptable.classification.str.contains(""rejected"")].index.values
        rej_idx = comptable.loc[comptable.classification.str.contains(""rejected"")].index.values
        acc_ts = mmix[:, acc_idx]
        rej_ts = mmix[:, rej_idx]
        betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0]
        pred_rej_ts = np.dot(acc_ts, betas)
        resid = rej_ts - pred_rej_ts
        mmix[:, rej_idx] = resid
        comp_names = [
            io.add_decomp_prefix(comp, prefix=""ica"", max_value=comptable.index.max())
            for comp in comptable.index.values
        ]
        mixing_df = pd.DataFrame(data=mmix, columns=comp_names)
        io_generator.save_file(mixing_df, ""ICA orthogonalized mixing tsv"")
        RepLGR.info(
            ""Rejected components' time series were then ""
            ""orthogonalized with respect to accepted components' time ""
            ""series.""
        )

    io.writeresults(
        data_oc,
        mask=mask_denoise,
        comptable=comptable,
        mmix=mmix,
        n_vols=n_vols,
        io_generator=io_generator,
    )

    if ""mir"" in gscontrol:
        gsc.minimum_image_regression(data_oc, mmix, mask_denoise, comptable, io_generator)

    if verbose:
        io.writeresults_echoes(catd, mmix, mask_denoise, comptable, io_generator)

    # Write out BIDS-compatible description file
    derivative_metadata = {
        ""Name"": ""tedana Outputs"",
        ""BIDSVersion"": ""1.5.0"",
        ""DatasetType"": ""derivative"",
        ""GeneratedBy"": [
            {
                ""Name"": ""tedana"",
                ""Version"": __version__,
                ""Description"": (
                    ""A denoising pipeline for the identification and removal ""
                    ""of non-BOLD noise from multi-echo fMRI data.""
                ),
                ""CodeURL"": ""https://github.com/ME-ICA/tedana"",
            }
        ],
    }
    with open(io_generator.get_name(""data description json""), ""w"") as fo:
        json.dump(derivative_metadata, fo, sort_keys=True, indent=4)

    RepLGR.info(
        ""This workflow used numpy (Van Der Walt, Colbert, & ""
        ""Varoquaux, 2011), scipy (Jones et al., 2001), pandas ""
        ""(McKinney, 2010), scikit-learn (Pedregosa et al., 2011), ""
        ""nilearn, and nibabel (Brett et al., 2019).""
    )
    RefLGR.info(
        ""Van Der Walt, S., Colbert, S. C., & Varoquaux, G. (2011). The ""
        ""NumPy array: a structure for efficient numerical computation. ""
        ""Computing in Science & Engineering, 13(2), 22.""
    )
    RefLGR.info(
        ""Jones E, Oliphant E, Peterson P, et al. SciPy: Open Source ""
        ""Scientific Tools for Python, 2001-, http://www.scipy.org/""
    )
    RefLGR.info(
        ""McKinney, W. (2010, June). Data structures for statistical ""
        ""computing in python. In Proceedings of the 9th Python in ""
        ""Science Conference (Vol. 445, pp. 51-56).""
    )
    RefLGR.info(
        ""Pedregosa, F., Varoquaux, G., Gramfort, A., Michel, V., ""
        ""Thirion, B., Grisel, O., ... & Vanderplas, J. (2011). ""
        ""Scikit-learn: Machine learning in Python. Journal of machine ""
        ""learning research, 12(Oct), 2825-2830.""
    )
    RefLGR.info(
        ""Brett, M., Markiewicz, C. J., Hanke, M., Côté, M.-A., ""
        ""Cipollini, B., McCarthy, P., … freec84. (2019, May 28). ""
        ""nipy/nibabel. Zenodo. http://doi.org/10.5281/zenodo.3233118""
    )

    RepLGR.info(
        ""This workflow also used the Dice similarity index "" ""(Dice, 1945; Sørensen, 1948).""
    )
    RefLGR.info(
        ""Dice, L. R. (1945). Measures of the amount of ecologic ""
        ""association between species. Ecology, 26(3), 297-302.""
    )
    RefLGR.info(
        ""Sørensen, T. J. (1948). A method of establishing groups of ""
        ""equal amplitude in plant sociology based on similarity of ""
        ""species content and its application to analyses of the ""
        ""vegetation on Danish commons. I kommission hos E. Munksgaard.""
    )

    with open(repname, ""r"") as fo:
        report = [line.rstrip() for line in fo.readlines()]
        report = "" "".join(report)
    with open(refname, ""r"") as fo:
        reference_list = sorted(list(set(fo.readlines())))
        references = ""\n"".join(reference_list)
    report += ""\n\nReferences:\n\n"" + references
    with open(repname, ""w"") as fo:
        fo.write(report)

    if not no_reports:
        LGR.info(""Making figures folder with static component maps and timecourse plots."")

        dn_ts, hikts, lowkts = io.denoise_ts(data_oc, mmix, mask_denoise, comptable)

        reporting.static_figures.carpet_plot(
            optcom_ts=data_oc,
            denoised_ts=dn_ts,
            hikts=hikts,
            lowkts=lowkts,
            mask=mask_denoise,
            io_generator=io_generator,
            gscontrol=gscontrol,
        )
        reporting.static_figures.comp_figures(
            data_oc,
            mask=mask_denoise,
            comptable=comptable,
            mmix=mmix_orig,
            io_generator=io_generator,
            png_cmap=png_cmap,
        )

        if sys.version_info.major == 3 and sys.version_info.minor < 6:
            warn_msg = (
                ""Reports requested but Python version is less than ""
                ""3.6.0. Dynamic reports will not be generated.""
            )
            LGR.warn(warn_msg)
        else:
            LGR.info(""Generating dynamic report"")
            reporting.generate_report(io_generator, tr=img_t_r)

    LGR.info(""Workflow completed"")
    utils.teardown_loggers()
    os.remove(refname)

","def tedana_workflow(
    data,
    tes,
    out_dir=""."",
    mask=None,
    convention=""bids"",
    prefix="""",
    fittype=""loglin"",
    combmode=""t2s"",
    tedpca=""aic"",
    fixed_seed=42,
    maxit=500,
    maxrestart=10,
    tedort=False,
    gscontrol=None,
    no_reports=False,
    png_cmap=""coolwarm"",
    verbose=False,
    low_mem=False,
    debug=False,
    quiet=False,
    t2smap=None,
    mixm=None,
    ctab=None,
    manacc=None,
):
    """"""
    Run the ""canonical"" TE-Dependent ANAlysis workflow.

    Please remember to cite [1]_.

    Parameters
    ----------
    data : :obj:`str` or :obj:`list` of :obj:`str`
        Either a single z-concatenated file (single-entry list or str) or a
        list of echo-specific files, in ascending order.
    tes : :obj:`list`
        List of echo times associated with data in milliseconds.
    out_dir : :obj:`str`, optional
        Output directory.
    mask : :obj:`str` or None, optional
        Binary mask of voxels to include in TE Dependent ANAlysis. Must be
        spatially aligned with `data`. If an explicit mask is not provided,
        then Nilearn's compute_epi_mask function will be used to derive a mask
        from the first echo's data.
    fittype : {'loglin', 'curvefit'}, optional
        Monoexponential fitting method. 'loglin' uses the the default linear
        fit to the log of the data. 'curvefit' uses a monoexponential fit to
        the raw data, which is slightly slower but may be more accurate.
        Default is 'loglin'.
    combmode : {'t2s'}, optional
        Combination scheme for TEs: 't2s' (Posse 1999, default).
    tedpca : {'mdl', 'aic', 'kic', 'kundu', 'kundu-stabilize', float}, optional
        Method with which to select components in TEDPCA.
        If a float is provided, then it is assumed to represent percentage of variance
        explained (0-1) to retain from PCA.
        Default is 'aic'.
    tedort : :obj:`bool`, optional
        Orthogonalize rejected components w.r.t. accepted ones prior to
        denoising. Default is False.
    gscontrol : {None, 'mir', 'gsr'} or :obj:`list`, optional
        Perform additional denoising to remove spatially diffuse noise. Default
        is None.
    verbose : :obj:`bool`, optional
        Generate intermediate and additional files. Default is False.
    no_reports : obj:'bool', optional
        Do not generate .html reports and .png plots. Default is false such
        that reports are generated.
    png_cmap : obj:'str', optional
        Name of a matplotlib colormap to be used when generating figures.
        Cannot be used with --no-png. Default is 'coolwarm'.
    t2smap : :obj:`str`, optional
        Precalculated T2* map in the same space as the input data. Values in
        the map must be in seconds.
    mixm : :obj:`str` or None, optional
        File containing mixing matrix, to be used when re-running the workflow.
        If not provided, ME-PCA and ME-ICA are done. Default is None.
    ctab : :obj:`str` or None, optional
        File containing component table from which to extract pre-computed
        classifications, to be used with 'mixm' when re-running the workflow.
        Default is None.
    manacc : :obj:`list` of :obj:`int` or None, optional
        List of manually accepted components. Can be a list of the components
        numbers or None.
        If provided, this parameter requires ``mixm`` and ``ctab`` to be provided as well.
        Default is None.

    Other Parameters
    ----------------
    fixed_seed : :obj:`int`, optional
        Value passed to ``mdp.numx_rand.seed()``.
        Set to a positive integer value for reproducible ICA results;
        otherwise, set to -1 for varying results across calls.
    maxit : :obj:`int`, optional
        Maximum number of iterations for ICA. Default is 500.
    maxrestart : :obj:`int`, optional
        Maximum number of attempts for ICA. If ICA fails to converge, the
        fixed seed will be updated and ICA will be run again. If convergence
        is achieved before maxrestart attempts, ICA will finish early.
        Default is 10.
    low_mem : :obj:`bool`, optional
        Enables low-memory processing, including the use of IncrementalPCA.
        May increase workflow duration. Default is False.
    debug : :obj:`bool`, optional
        Whether to run in debugging mode or not. Default is False.
    quiet : :obj:`bool`, optional
        If True, suppresses logging/printing of messages. Default is False.

    Notes
    -----
    This workflow writes out several files. For a complete list of the files
    generated by this workflow, please visit
    https://tedana.readthedocs.io/en/latest/outputs.html

    References
    ----------
    .. [1] DuPre, E. M., Salo, T., Ahmed, Z., Bandettini, P. A., Bottenhorn, K. L.,
           Caballero-Gaudes, C., Dowdle, L. T., Gonzalez-Castillo, J., Heunis, S.,
           Kundu, P., Laird, A. R., Markello, R., Markiewicz, C. J., Moia, S.,
           Staden, I., Teves, J. B., Uruñuela, E., Vaziri-Pashkam, M.,
           Whitaker, K., & Handwerker, D. A. (2021).
           TE-dependent analysis of multi-echo fMRI with tedana.
           Journal of Open Source Software, 6(66), 3669. doi:10.21105/joss.03669.
    """"""
    out_dir = op.abspath(out_dir)
    if not op.isdir(out_dir):
        os.mkdir(out_dir)

    # boilerplate
    basename = ""report""
    extension = ""txt""
    repname = op.join(out_dir, (basename + ""."" + extension))
    repex = op.join(out_dir, (basename + ""*""))
    previousreps = glob(repex)
    previousreps.sort(reverse=True)
    for f in previousreps:
        previousparts = op.splitext(f)
        newname = previousparts[0] + ""_old"" + previousparts[1]
        os.rename(f, newname)
    refname = op.join(out_dir, ""_references.txt"")

    # create logfile name
    basename = ""tedana_""
    extension = ""tsv""
    start_time = datetime.datetime.now().strftime(""%Y-%m-%dT%H%M%S"")
    logname = op.join(out_dir, (basename + start_time + ""."" + extension))
    utils.setup_loggers(logname, repname, refname, quiet=quiet, debug=debug)

    LGR.info(""Using output directory: {}"".format(out_dir))

    # ensure tes are in appropriate format
    tes = [float(te) for te in tes]
    n_echos = len(tes)

    # Coerce gscontrol to list
    if not isinstance(gscontrol, list):
        gscontrol = [gscontrol]

    # Check value of tedpca *if* it is a predefined string,
    # a float on [0, 1] or an int >= 1
    tedpca = check_tedpca_value(tedpca, is_parser=False)

    LGR.info(""Loading input data: {}"".format([f for f in data]))
    catd, ref_img = io.load_data(data, n_echos=n_echos)
    io_generator = io.OutputGenerator(
        ref_img,
        convention=convention,
        out_dir=out_dir,
        prefix=prefix,
        config=""auto"",
        verbose=verbose,
    )

    n_samp, n_echos, n_vols = catd.shape
    LGR.debug(""Resulting data shape: {}"".format(catd.shape))

    # check if TR is 0
    img_t_r = io_generator.reference_img.header.get_zooms()[-1]
    if img_t_r == 0:
        raise IOError(
            ""Dataset has a TR of 0. This indicates incorrect""
            "" header information. To correct this, we recommend""
            "" using this snippet:""
            ""\n""
            ""https://gist.github.com/jbteves/032c87aeb080dd8de8861cb151bff5d6""
            ""\n""
            ""to correct your TR to the value it should be.""
        )

    if mixm is not None and op.isfile(mixm):
        mixm = op.abspath(mixm)
        # Allow users to re-run on same folder
        mixing_name = io_generator.get_name(""ICA mixing tsv"")
        if mixm != mixing_name:
            shutil.copyfile(mixm, mixing_name)
            shutil.copyfile(mixm, op.join(io_generator.out_dir, op.basename(mixm)))
    elif mixm is not None:
        raise IOError(""Argument 'mixm' must be an existing file."")

    if ctab is not None and op.isfile(ctab):
        ctab = op.abspath(ctab)
        # Allow users to re-run on same folder
        metrics_name = io_generator.get_name(""ICA metrics tsv"")
        if ctab != metrics_name:
            shutil.copyfile(ctab, metrics_name)
            shutil.copyfile(ctab, op.join(io_generator.out_dir, op.basename(ctab)))
    elif ctab is not None:
        raise IOError(""Argument 'ctab' must be an existing file."")

    if ctab and not mixm:
        LGR.warning(""Argument 'ctab' requires argument 'mixm'."")
        ctab = None
    elif manacc is not None and (not mixm or not ctab):
        LGR.warning(""Argument 'manacc' requires arguments 'mixm' and 'ctab'."")
        manacc = None
    elif manacc is not None:
        # coerce to list of integers
        manacc = [int(m) for m in manacc]

    if t2smap is not None and op.isfile(t2smap):
        t2smap_file = io_generator.get_name(""t2star img"")
        t2smap = op.abspath(t2smap)
        # Allow users to re-run on same folder
        if t2smap != t2smap_file:
            shutil.copyfile(t2smap, t2smap_file)
    elif t2smap is not None:
        raise IOError(""Argument 't2smap' must be an existing file."")

    RepLGR.info(
        ""TE-dependence analysis was performed on input data using the tedana workflow ""
        ""(DuPre, Salo et al., 2021).""
    )
    RefLGR.info(
        ""DuPre, E. M., Salo, T., Ahmed, Z., Bandettini, P. A., Bottenhorn, K. L., ""
        ""Caballero-Gaudes, C., Dowdle, L. T., Gonzalez-Castillo, J., Heunis, S., ""
        ""Kundu, P., Laird, A. R., Markello, R., Markiewicz, C. J., Moia, S., ""
        ""Staden, I., Teves, J. B., Uruñuela, E., Vaziri-Pashkam, M., ""
        ""Whitaker, K., & Handwerker, D. A. (2021). ""
        ""TE-dependent analysis of multi-echo fMRI with tedana. ""
        ""Journal of Open Source Software, 6(66), 3669. doi:10.21105/joss.03669.""
    )

    if mask and not t2smap:
        # TODO: add affine check
        LGR.info(""Using user-defined mask"")
        RepLGR.info(""A user-defined mask was applied to the data."")
    elif t2smap and not mask:
        LGR.info(""Using user-defined T2* map to generate mask"")
        t2s_limited_sec = utils.reshape_niimg(t2smap)
        t2s_limited = utils.sec2millisec(t2s_limited_sec)
        t2s_full = t2s_limited.copy()
        mask = (t2s_limited != 0).astype(int)
    elif t2smap and mask:
        LGR.info(""Combining user-defined mask and T2* map to generate mask"")
        t2s_limited_sec = utils.reshape_niimg(t2smap)
        t2s_limited = utils.sec2millisec(t2s_limited_sec)
        t2s_full = t2s_limited.copy()
        mask = utils.reshape_niimg(mask)
        mask[t2s_limited == 0] = 0  # reduce mask based on T2* map
    else:
        LGR.info(""Computing EPI mask from first echo"")
        first_echo_img = io.new_nii_like(io_generator.reference_img, catd[:, 0, :])
        mask = compute_epi_mask(first_echo_img)
        RepLGR.info(
            ""An initial mask was generated from the first echo using ""
            ""nilearn's compute_epi_mask function.""
        )

    # Create an adaptive mask with at least 1 good echo, for denoising
    mask_denoise, masksum_denoise = utils.make_adaptive_mask(
        catd,
        mask=mask,
        getsum=True,
        threshold=1,
    )
    LGR.debug(""Retaining {}/{} samples for denoising"".format(mask_denoise.sum(), n_samp))
    io_generator.save_file(masksum_denoise, ""adaptive mask img"")

    # Create an adaptive mask with at least 3 good echoes, for classification
    masksum_clf = masksum_denoise.copy()
    masksum_clf[masksum_clf < 3] = 0
    mask_clf = masksum_clf.astype(bool)
    RepLGR.info(
        ""A two-stage masking procedure was applied, in which a liberal mask ""
        ""(including voxels with good data in at least the first echo) was used for ""
        ""optimal combination, T2*/S0 estimation, and denoising, while a more conservative mask ""
        ""(restricted to voxels with good data in at least the first three echoes) was used for ""
        ""the component classification procedure.""
    )
    LGR.debug(""Retaining {}/{} samples for classification"".format(mask_clf.sum(), n_samp))

    if t2smap is None:
        LGR.info(""Computing T2* map"")
        t2s_limited, s0_limited, t2s_full, s0_full = decay.fit_decay(
            catd, tes, mask_denoise, masksum_denoise, fittype
        )

        # set a hard cap for the T2* map
        # anything that is 10x higher than the 99.5 %ile will be reset to 99.5 %ile
        cap_t2s = stats.scoreatpercentile(t2s_full.flatten(), 99.5, interpolation_method=""lower"")
        LGR.debug(""Setting cap on T2* map at {:.5f}s"".format(utils.millisec2sec(cap_t2s)))
        t2s_full[t2s_full > cap_t2s * 10] = cap_t2s
        io_generator.save_file(utils.millisec2sec(t2s_full), ""t2star img"")
        io_generator.save_file(s0_full, ""s0 img"")

        if verbose:
            io_generator.save_file(utils.millisec2sec(t2s_limited), ""limited t2star img"")
            io_generator.save_file(s0_limited, ""limited s0 img"")

    # optimally combine data
    data_oc = combine.make_optcom(catd, tes, masksum_denoise, t2s=t2s_full, combmode=combmode)

    # regress out global signal unless explicitly not desired
    if ""gsr"" in gscontrol:
        catd, data_oc = gsc.gscontrol_raw(catd, data_oc, n_echos, io_generator)

    fout = io_generator.save_file(data_oc, ""combined img"")
    LGR.info(""Writing optimally combined data set: {}"".format(fout))

    if mixm is None:
        # Identify and remove thermal noise from data
        dd, n_components = decomposition.tedpca(
            catd,
            data_oc,
            combmode,
            mask_clf,
            masksum_clf,
            t2s_full,
            io_generator,
            tes=tes,
            algorithm=tedpca,
            kdaw=10.0,
            rdaw=1.0,
            verbose=verbose,
            low_mem=low_mem,
        )
        if verbose:
            io_generator.save_file(utils.unmask(dd, mask_clf), ""whitened img"")

        # Perform ICA, calculate metrics, and apply decision tree
        # Restart when ICA fails to converge or too few BOLD components found
        keep_restarting = True
        n_restarts = 0
        seed = fixed_seed
        while keep_restarting:
            mmix, seed = decomposition.tedica(
                dd, n_components, seed, maxit, maxrestart=(maxrestart - n_restarts)
            )
            seed += 1
            n_restarts = seed - fixed_seed

            # Estimate betas and compute selection metrics for mixing matrix
            # generated from dimensionally reduced data using full data (i.e., data
            # with thermal noise)
            LGR.info(""Making second component selection guess from ICA results"")
            required_metrics = [
                ""kappa"",
                ""rho"",
                ""countnoise"",
                ""countsigFT2"",
                ""countsigFS0"",
                ""dice_FT2"",
                ""dice_FS0"",
                ""signal-noise_t"",
                ""variance explained"",
                ""normalized variance explained"",
                ""d_table_score"",
            ]
            comptable = metrics.collect.generate_metrics(
                catd,
                data_oc,
                mmix,
                masksum_clf,
                tes,
                io_generator,
                ""ICA"",
                metrics=required_metrics,
            )
            comptable, metric_metadata = selection.kundu_selection_v2(comptable, n_echos, n_vols)

            n_bold_comps = comptable[comptable.classification == ""accepted""].shape[0]
            if (n_restarts < maxrestart) and (n_bold_comps == 0):
                LGR.warning(""No BOLD components found. Re-attempting ICA."")
            elif n_bold_comps == 0:
                LGR.warning(""No BOLD components found, but maximum number of restarts reached."")
                keep_restarting = False
            else:
                keep_restarting = False

            RepLGR.disabled = True  # Disable the report to avoid duplicate text
        RepLGR.disabled = False  # Re-enable the report after the while loop is escaped
    else:
        LGR.info(""Using supplied mixing matrix from ICA"")
        mixing_file = io_generator.get_name(""ICA mixing tsv"")
        mmix = pd.read_table(mixing_file).values

        if ctab is None:
            required_metrics = [
                ""kappa"",
                ""rho"",
                ""countnoise"",
                ""countsigFT2"",
                ""countsigFS0"",
                ""dice_FT2"",
                ""dice_FS0"",
                ""signal-noise_t"",
                ""variance explained"",
                ""normalized variance explained"",
                ""d_table_score"",
            ]
            comptable = metrics.collect.generate_metrics(
                catd,
                data_oc,
                mmix,
                masksum_clf,
                tes,
                io_generator,
                ""ICA"",
                metrics=required_metrics,
            )
            comptable, metric_metadata = selection.kundu_selection_v2(comptable, n_echos, n_vols)
        else:
            LGR.info(""Using supplied component table for classification"")
            comptable = pd.read_table(ctab)
            # Change rationale value of rows with NaN to empty strings
            comptable.loc[comptable.rationale.isna(), ""rationale""] = """"

            if manacc is not None:
                comptable, metric_metadata = selection.manual_selection(comptable, acc=manacc)

    # Write out ICA files.
    comp_names = comptable[""Component""].values
    mixing_df = pd.DataFrame(data=mmix, columns=comp_names)
    io_generator.save_file(mixing_df, ""ICA mixing tsv"")
    betas_oc = utils.unmask(computefeats2(data_oc, mmix, mask_denoise), mask_denoise)
    io_generator.save_file(betas_oc, ""z-scored ICA components img"")

    # Save component table and associated json
    io_generator.save_file(comptable, ""ICA metrics tsv"")
    metric_metadata = metrics.collect.get_metadata(comptable)
    io_generator.save_file(metric_metadata, ""ICA metrics json"")

    decomp_metadata = {
        ""Method"": (
            ""Independent components analysis with FastICA algorithm implemented by sklearn. ""
        ),
    }
    for comp_name in comp_names:
        decomp_metadata[comp_name] = {
            ""Description"": ""ICA fit to dimensionally-reduced optimally combined data."",
            ""Method"": ""tedana"",
        }
    with open(io_generator.get_name(""ICA decomposition json""), ""w"") as fo:
        json.dump(decomp_metadata, fo, sort_keys=True, indent=4)

    if comptable[comptable.classification == ""accepted""].shape[0] == 0:
        LGR.warning(""No BOLD components detected! Please check data and results!"")

    mmix_orig = mmix.copy()
    if tedort:
        acc_idx = comptable.loc[~comptable.classification.str.contains(""rejected"")].index.values
        rej_idx = comptable.loc[comptable.classification.str.contains(""rejected"")].index.values
        acc_ts = mmix[:, acc_idx]
        rej_ts = mmix[:, rej_idx]
        betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0]
        pred_rej_ts = np.dot(acc_ts, betas)
        resid = rej_ts - pred_rej_ts
        mmix[:, rej_idx] = resid
        comp_names = [
            io.add_decomp_prefix(comp, prefix=""ica"", max_value=comptable.index.max())
            for comp in comptable.index.values
        ]
        mixing_df = pd.DataFrame(data=mmix, columns=comp_names)
        io_generator.save_file(mixing_df, ""ICA orthogonalized mixing tsv"")
        RepLGR.info(
            ""Rejected components' time series were then ""
            ""orthogonalized with respect to accepted components' time ""
            ""series.""
        )

    io.writeresults(
        data_oc,
        mask=mask_denoise,
        comptable=comptable,
        mmix=mmix,
        n_vols=n_vols,
        io_generator=io_generator,
    )

    if ""mir"" in gscontrol:
        gsc.minimum_image_regression(data_oc, mmix, mask_denoise, comptable, io_generator)

    if verbose:
        io.writeresults_echoes(catd, mmix, mask_denoise, comptable, io_generator)

    # Write out BIDS-compatible description file
    derivative_metadata = {
        ""Name"": ""tedana Outputs"",
        ""BIDSVersion"": ""1.5.0"",
        ""DatasetType"": ""derivative"",
        ""GeneratedBy"": [
            {
                ""Name"": ""tedana"",
                ""Version"": __version__,
                ""Description"": (
                    ""A denoising pipeline for the identification and removal ""
                    ""of non-BOLD noise from multi-echo fMRI data.""
                ),
                ""CodeURL"": ""https://github.com/ME-ICA/tedana"",
            }
        ],
    }
    with open(io_generator.get_name(""data description json""), ""w"") as fo:
        json.dump(derivative_metadata, fo, sort_keys=True, indent=4)

    RepLGR.info(
        ""This workflow used numpy (Van Der Walt, Colbert, & ""
        ""Varoquaux, 2011), scipy (Jones et al., 2001), pandas ""
        ""(McKinney, 2010), scikit-learn (Pedregosa et al., 2011), ""
        ""nilearn, and nibabel (Brett et al., 2019).""
    )
    RefLGR.info(
        ""Van Der Walt, S., Colbert, S. C., & Varoquaux, G. (2011). The ""
        ""NumPy array: a structure for efficient numerical computation. ""
        ""Computing in Science & Engineering, 13(2), 22.""
    )
    RefLGR.info(
        ""Jones E, Oliphant E, Peterson P, et al. SciPy: Open Source ""
        ""Scientific Tools for Python, 2001-, http://www.scipy.org/""
    )
    RefLGR.info(
        ""McKinney, W. (2010, June). Data structures for statistical ""
        ""computing in python. In Proceedings of the 9th Python in ""
        ""Science Conference (Vol. 445, pp. 51-56).""
    )
    RefLGR.info(
        ""Pedregosa, F., Varoquaux, G., Gramfort, A., Michel, V., ""
        ""Thirion, B., Grisel, O., ... & Vanderplas, J. (2011). ""
        ""Scikit-learn: Machine learning in Python. Journal of machine ""
        ""learning research, 12(Oct), 2825-2830.""
    )
    RefLGR.info(
        ""Brett, M., Markiewicz, C. J., Hanke, M., Côté, M.-A., ""
        ""Cipollini, B., McCarthy, P., … freec84. (2019, May 28). ""
        ""nipy/nibabel. Zenodo. http://doi.org/10.5281/zenodo.3233118""
    )

    RepLGR.info(
        ""This workflow also used the Dice similarity index "" ""(Dice, 1945; Sørensen, 1948).""
    )
    RefLGR.info(
        ""Dice, L. R. (1945). Measures of the amount of ecologic ""
        ""association between species. Ecology, 26(3), 297-302.""
    )
    RefLGR.info(
        ""Sørensen, T. J. (1948). A method of establishing groups of ""
        ""equal amplitude in plant sociology based on similarity of ""
        ""species content and its application to analyses of the ""
        ""vegetation on Danish commons. I kommission hos E. Munksgaard.""
    )

    with open(repname, ""r"") as fo:
        report = [line.rstrip() for line in fo.readlines()]
        report = "" "".join(report)
    with open(refname, ""r"") as fo:
        reference_list = sorted(list(set(fo.readlines())))
        references = ""\n"".join(reference_list)
    report += ""\n\nReferences:\n\n"" + references
    with open(repname, ""w"") as fo:
        fo.write(report)

    if not no_reports:
        LGR.info(""Making figures folder with static component maps and timecourse plots."")

        dn_ts, hikts, lowkts = io.denoise_ts(data_oc, mmix, mask_denoise, comptable)

        reporting.static_figures.carpet_plot(
            optcom_ts=data_oc,
            denoised_ts=dn_ts,
            hikts=hikts,
            lowkts=lowkts,
            mask=mask_denoise,
            io_generator=io_generator,
            gscontrol=gscontrol,
        )
        reporting.static_figures.comp_figures(
            data_oc,
            mask=mask_denoise,
            comptable=comptable,
            mmix=mmix_orig,
            io_generator=io_generator,
            png_cmap=png_cmap,
        )

        if sys.version_info.major == 3 and sys.version_info.minor < 6:
            warn_msg = (
                ""Reports requested but Python version is less than ""
                ""3.6.0. Dynamic reports will not be generated.""
            )
            LGR.warn(warn_msg)
        else:
            LGR.info(""Generating dynamic report"")
            reporting.generate_report(io_generator, tr=img_t_r)

    LGR.info(""Workflow completed"")
    utils.teardown_loggers()
    os.remove(refname)

"
4738,"def get_cmap(name=None, lut=None):
    """"""
    Get a colormap instance, defaulting to rc values if *name* is None.

    Colormaps added with :func:`register_cmap` take precedence over
    built-in colormaps.

    Parameters
    ----------
    name : `matplotlib.colors.Colormap` or str or None, default: None
        If a `Colormap` instance, it will be returned.  Otherwise, the name of
        a colormap known to Matplotlib, which will be resampled by *lut*.  The
        default, None, means :rc:`image.cmap`.
    lut : int or None, default: None
        If *name* is a string or None, and *lut* is not None, the colormap will
        be resampled to have *lut* entries in the lookup table.
    """"""
    if name is None:
        name = mpl.rcParams['image.cmap']
    if isinstance(name, colors.Colormap):
        return name
    cbook._check_in_list(sorted(cmap_d), name=name)
    if lut is None:
        return cmap_d[name]
    else:
        return cmap_d[name]._resample(lut)

","def get_cmap(name=None, lut=None):
    """"""
    Get a colormap instance, defaulting to rc values if *name* is None.

    Colormaps added with :func:`register_cmap` take precedence over
    built-in colormaps.

    Parameters
    ----------
    name : `matplotlib.colors.Colormap` or str or None, default: None
        If a `Colormap` instance, it will be returned.  Otherwise, the name of
        a colormap known to Matplotlib, which will be resampled by *lut*.  The
        default, None, means :rc:`image.cmap`.
    lut : int or None, default: None
        If *name* is not a Colormap instance, and *lut* is not None, the created colormap will
        be resampled to have *lut* entries in the lookup table.
    """"""
    if name is None:
        name = mpl.rcParams['image.cmap']
    if isinstance(name, colors.Colormap):
        return name
    cbook._check_in_list(sorted(cmap_d), name=name)
    if lut is None:
        return cmap_d[name]
    else:
        return cmap_d[name]._resample(lut)

"
25768,"def run_and_read_cplex(n, problem_fn, solution_fn, solver_logfile,
                        solver_options, warmstart=None, store_basis=True):
    """"""
    Solving function. Reads the linear problem file and passes it to the cplex
    solver. If the solution is sucessful it returns variable solutions and
    constraint dual values. Cplex must be installed for using this function

    """"""
    import cplex
    m = cplex.Cplex()
    out = m.set_log_stream(solver_logfile)
    if solver_options is not None:
        for key, value in solver_options.items():
            getattr(m.parameters, key).set(value)
    m.read(problem_fn)
    if warmstart:
        m.start.read_basis(warmstart)
    m.solve()
    is_lp = m.problem_type[m.get_problem_type()] == 'LP'

    termination_condition = m.solution.get_status_string()
    if 'optimal' in termination_condition:
        status = 'ok'
        termination_condition = 'optimal'
    else:
        status = 'warning'

    if (status == 'ok') and store_basis and is_lp:
        n.basis_fn = solution_fn.replace('.sol', '.bas')
        m.solution.basis.write(n.basis_fn)

    objective = m.solution.get_objective_value()
    sol = pd.Series(m.solution.get_values(), m.variables.get_names())
    if is_lp:
        dual = pd.Series(m.solution.get_dual_values(),
                         m.linear_constraints.get_names())
    else:
        logger.warning(""Shadow prices of MILP couldn't be parsed"")
        dual = pd.Series(index=m.linear_constraints.get_names())
    return (status, termination_condition, sol, dual, objective)

","def run_and_read_cplex(n, problem_fn, solution_fn, solver_logfile,
                        solver_options, warmstart=None, store_basis=True):
    """"""
    Solving function. Reads the linear problem file and passes it to the cplex
    solver. If the solution is sucessful it returns variable solutions and
    constraint dual values. Cplex must be installed for using this function

    """"""
    import cplex
    m = cplex.Cplex()
    out = m.set_log_stream(solver_logfile)
    if solver_options is not None:
        for key, value in solver_options.items():
            getattr(m.parameters, key).set(value)
    m.read(problem_fn)
    if warmstart:
        m.start.read_basis(warmstart)
    m.solve()
    is_lp = m.problem_type[m.get_problem_type()] == 'LP'

    termination_condition = m.solution.get_status_string()
    if 'optimal' in termination_condition:
        status = 'ok'
        termination_condition = 'optimal'
    else:
        status = 'warning'

    if (status == 'ok') and store_basis and is_lp:
        n.basis_fn = solution_fn.replace('.sol', '.bas')
        m.solution.basis.write(n.basis_fn)

    objective = m.solution.get_objective_value()
    sol = pd.Series(m.solution.get_values(), m.variables.get_names())
    if is_lp:
        dual = pd.Series(m.solution.get_dual_values(),
                         m.linear_constraints.get_names())
    else:
        logger.warning(""Shadow prices of MILP couldn't be parsed"")
        dual = pd.Series(index=m.linear_constraints.get_names()).pipe(set_int_index)
    return (status, termination_condition, sol, dual, objective)

"
58525,"def reduce_multigpu(tensor_list: list,
                    dst_rank: int = 0,
                    dst_tensor: int = 0,
                    group_name: str = ""default"",
                    op=types.ReduceOp.SUM):
    """"""Reduce the tensor across the group to the destination rank
    and destination tensor.

    Args:
        tensor_list: the list of tensors to be reduced on this process;
                     each tensor located on a GPU.
        dst_rank: the rank of the destination process.
        dst_tensor: the index of GPU at the destination.
        group_name: the collective group name to perform reduce.
        op: The reduce operation.

    Returns:
        None
    """"""
    if not types.cupy_available():
        raise RuntimeError(""Multigpu calls requires NCCL and Cupy."")
    _check_tensor_list_input(tensor_list)
    g = _check_and_get_group(group_name)

    # check dst rank
    _check_rank_valid(g, dst_rank)
    _check_root_tensor_valid(len(tensor_list), dst_tensor)
    opts = types.ReduceOptions()
    opts.reduceOp = op
    opts.root_rank = dst_rank
    opts.root_tensor = dst_tensor
    g.reduce(tensor_list, opts)

","def reduce_multigpu(tensor_list: list,
                    dst_rank: int = 0,
                    dst_tensor: int = 0,
                    group_name: str = ""default"",
                    op=types.ReduceOp.SUM):
    """"""Reduce the tensor across the group to the destination rank
    and destination tensor.

    Args:
        tensor_list: the list of tensors to be reduced on this process;
             each tensor located on a GPU.
        dst_rank: the rank of the destination process.
        dst_tensor: the index of GPU at the destination.
        group_name: the collective group name to perform reduce.
        op: The reduce operation.

    Returns:
        None
    """"""
    if not types.cupy_available():
        raise RuntimeError(""Multigpu calls requires NCCL and Cupy."")
    _check_tensor_list_input(tensor_list)
    g = _check_and_get_group(group_name)

    # check dst rank
    _check_rank_valid(g, dst_rank)
    _check_root_tensor_valid(len(tensor_list), dst_tensor)
    opts = types.ReduceOptions()
    opts.reduceOp = op
    opts.root_rank = dst_rank
    opts.root_tensor = dst_tensor
    g.reduce(tensor_list, opts)

"
32449,"def main() -> None:  # pragma: no cover
    params = demisto.params()

    url = params.get('url')
    api_version = params.get('api_version')
    token = demisto.params().get('credentials', {}).get('password')
    base_url = urljoin(url, f'/api/{api_version}/')
    verify_certificate = not demisto.params().get('insecure', False)
    proxy = demisto.params().get('proxy', False)
    first_fetch = params.get('first_fetch')
    max_fetch = params.get('max_fetch')
    vendor, product = params.get('vendor', 'netskope'), params.get('product', 'netskope')

    demisto.debug(f'Command being called is {demisto.command()}')
    try:
        client = Client(base_url, token, api_version, verify_certificate, proxy)

        last_run = demisto.getLastRun()
        if 'alert' not in last_run and 'application' not in last_run and 'audit' not in last_run \
                and 'network' not in last_run:
            last_run = arg_to_seconds_timestamp(first_fetch)
            last_run = {
                'alert': last_run,
                'application': last_run,
                'audit': last_run,
                'network': last_run
            }

        if demisto.command() == 'test-module':
            # This is the call made when pressing the integration Test button.
            result = test_module(client, api_version, last_run)
            return_results(result)

        elif demisto.command() == 'netskope-get-events':
            if api_version == 'v1':
                return_results(v1_get_events_command(client, demisto.args(), last_run))
            else:
                return_results(v2_get_events_command(client, demisto.args(), last_run))
        elif demisto.command() == 'fetch-events':
            if api_version == 'v1':
                events = client.get_events_request_v1(last_run, max_fetch)
                alerts = client.v1_get_alerts_request(last_run, max_fetch)
                if alerts:
                    events.extend(alerts)
                demisto.setLastRun(get_last_run(events, last_run))
                demisto.debug(f'Setting the last_run to: {last_run}')
                send_events_to_xsiam(events=events, vendor=vendor, product=product)
            else:
                events = client.get_events_request_v2(last_run, max_fetch)
                demisto.setLastRun(get_last_run(events, last_run))
                demisto.debug(f'Setting the last_run to: {last_run}')
                send_events_to_xsiam(events=events, vendor=vendor, product=product)

    # Log exceptions and return errors
    except Exception as e:
        return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')

","def main() -> None:  # pragma: no cover
    params = demisto.params()

    url = params.get('url')
    api_version = params.get('api_version')
    token = params.get('credentials', {}).get('password')
    base_url = urljoin(url, f'/api/{api_version}/')
    verify_certificate = not demisto.params().get('insecure', False)
    proxy = demisto.params().get('proxy', False)
    first_fetch = params.get('first_fetch')
    max_fetch = params.get('max_fetch')
    vendor, product = params.get('vendor', 'netskope'), params.get('product', 'netskope')

    demisto.debug(f'Command being called is {demisto.command()}')
    try:
        client = Client(base_url, token, api_version, verify_certificate, proxy)

        last_run = demisto.getLastRun()
        if 'alert' not in last_run and 'application' not in last_run and 'audit' not in last_run \
                and 'network' not in last_run:
            last_run = arg_to_seconds_timestamp(first_fetch)
            last_run = {
                'alert': last_run,
                'application': last_run,
                'audit': last_run,
                'network': last_run
            }

        if demisto.command() == 'test-module':
            # This is the call made when pressing the integration Test button.
            result = test_module(client, api_version, last_run)
            return_results(result)

        elif demisto.command() == 'netskope-get-events':
            if api_version == 'v1':
                return_results(v1_get_events_command(client, demisto.args(), last_run))
            else:
                return_results(v2_get_events_command(client, demisto.args(), last_run))
        elif demisto.command() == 'fetch-events':
            if api_version == 'v1':
                events = client.get_events_request_v1(last_run, max_fetch)
                alerts = client.v1_get_alerts_request(last_run, max_fetch)
                if alerts:
                    events.extend(alerts)
                demisto.setLastRun(get_last_run(events, last_run))
                demisto.debug(f'Setting the last_run to: {last_run}')
                send_events_to_xsiam(events=events, vendor=vendor, product=product)
            else:
                events = client.get_events_request_v2(last_run, max_fetch)
                demisto.setLastRun(get_last_run(events, last_run))
                demisto.debug(f'Setting the last_run to: {last_run}')
                send_events_to_xsiam(events=events, vendor=vendor, product=product)

    # Log exceptions and return errors
    except Exception as e:
        return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')

"
54877,"def fused_rms_norm_affine(input, weight, normalized_shape, eps=1e-6):
    # args = _cast_if_autocast_enabled(input, weight, bias, normalized_shape, eps)
    args = _cast_if_autocast_enabled(input, weight, normalized_shape, eps)
    with torch.cuda.amp.autocast(enabled=False):
        return FusedRMSNormAffineFunction.apply(*args)

","def fused_rms_norm_affine(input, weight, normalized_shape, eps=1e-6):
    args = _cast_if_autocast_enabled(input, weight, normalized_shape, eps)
    with torch.cuda.amp.autocast(enabled=False):
        return FusedRMSNormAffineFunction.apply(*args)

"
54887,"def run_transformer_tests():
    python_executable_path = sys.executable
    # repository_root = os.path.join(os.path.dirname(__file__), ""../../../"")
    # directory = os.path.abspath(os.path.join(repository_root, ""tests/mpu""))
    directory = os.path.dirname(__file__)
    files = [
        os.path.join(directory, f)
        for f in os.listdir(directory)
        if f.startswith(""run_"") and os.path.isfile(os.path.join(directory, f))
    ]
    print(""#######################################################"")
    print(f""# Python executable path: {python_executable_path}"")
    print(f""# {len(files)} tests: {files}"")
    print(""#######################################################"")
    errors = []
    for i, test_file in enumerate(files, 1):
        is_denied = False
        should_skip, launch_option = get_launch_option(test_file)
        if should_skip:
            print(
                f""### {i} / {len(files)}: {test_file} skipped. Requires multiple GPUs.""
            )
            continue
        test_run_cmd = (
            f""{python_executable_path} {launch_option} {test_file} ""
            ""--micro-batch-size 2 --num-layers 16 --hidden-size 256 --num-attention-heads 8 --max-position-embeddings ""
            ""512 --seq-length 512 --global-batch-size 128""
        )
        if ""bert"" in test_file or ""gpt"" in test_file:
            import torch

            num_devices = torch.cuda.device_count()
            tensor_model_parallel_size = 1 + (1 - (num_devices % 2 and num_devices > 4))
            pipeline_model_parallel_size = num_devices // tensor_model_parallel_size
            test_run_cmd += f"" --pipeline-model-parallel-size {pipeline_model_parallel_size} --tensor-model-parallel-size {tensor_model_parallel_size}""
            # TODO(mkozuki): Update apex.transformer.testing
            continue
        else:
            test_run_cmd += f"" --use-cpu-initialization""
        print(f""### {i} / {len(files)}: cmd: {test_run_cmd}"")
        try:
            output = (
                subprocess.check_output(test_run_cmd, shell=True)
                .decode(sys.stdout.encoding)
                .strip()
            )
        except Exception as e:
            errors.append((test_file, str(e)))
        else:
            if "">> passed the test :-)"" not in output:
                errors.append((test_file, output))
    else:
        if not errors:
            print(""### PASSED"")
        else:
            print(""### FAILED"")
            short_msg = f""{len(errors)} out of {len(files)} tests failed""
            print(short_msg)
            for (filename, log) in errors:
                print(f""File: {filename}\nLog: {log}"")
            raise RuntimeError(short_msg)

","def run_transformer_tests():
    python_executable_path = sys.executable
    # repository_root = os.path.join(os.path.dirname(__file__), ""../../../"")
    # directory = os.path.abspath(os.path.join(repository_root, ""tests/mpu""))
    directory = os.path.dirname(__file__)
    files = [
        os.path.join(directory, f)
        for f in os.listdir(directory)
        if f.startswith(""run_"") and os.path.isfile(os.path.join(directory, f))
    ]
    print(""#######################################################"")
    print(f""# Python executable path: {python_executable_path}"")
    print(f""# {len(files)} tests: {files}"")
    print(""#######################################################"")
    errors = []
    for i, test_file in enumerate(files, 1):
        is_denied = False
        should_skip, launch_option = get_launch_option(test_file)
        if should_skip:
            print(
                f""### {i} / {len(files)}: {test_file} skipped. Requires multiple GPUs.""
            )
            continue
        test_run_cmd = (
            f""{python_executable_path} {launch_option} {test_file} ""
            ""--micro-batch-size 2 --num-layers 16 --hidden-size 256 --num-attention-heads 8 --max-position-embeddings ""
            ""512 --seq-length 512 --global-batch-size 128""
        )
        if ""bert"" in test_file or ""gpt"" in test_file:
            import torch

            num_devices = torch.cuda.device_count()
            tensor_model_parallel_size = 1 + (1 - (num_devices % 2 and num_devices > 4))
            pipeline_model_parallel_size = num_devices // tensor_model_parallel_size
            test_run_cmd += f"" --pipeline-model-parallel-size {pipeline_model_parallel_size} --tensor-model-parallel-size {tensor_model_parallel_size}""
            # TODO(mkozuki): Update apex.transformer.testing. The model def.s and global vars are outdated so 
            #     that gpt/bert script functionally fail. e.g. the order of tensor dims.
            continue
        else:
            test_run_cmd += f"" --use-cpu-initialization""
        print(f""### {i} / {len(files)}: cmd: {test_run_cmd}"")
        try:
            output = (
                subprocess.check_output(test_run_cmd, shell=True)
                .decode(sys.stdout.encoding)
                .strip()
            )
        except Exception as e:
            errors.append((test_file, str(e)))
        else:
            if "">> passed the test :-)"" not in output:
                errors.append((test_file, output))
    else:
        if not errors:
            print(""### PASSED"")
        else:
            print(""### FAILED"")
            short_msg = f""{len(errors)} out of {len(files)} tests failed""
            print(short_msg)
            for (filename, log) in errors:
                print(f""File: {filename}\nLog: {log}"")
            raise RuntimeError(short_msg)

"
31768,"def main():
    params = demisto.params()
    args = demisto.args()
    url = params.get('url')
    verify_certificate = not params.get('insecure', False)
    proxy = params.get('proxy', False)
    headers = {}
    headers['PRIVATE-TOKEN'] = f'{params[""api_key""]}'

    command = demisto.command()
    LOG(f'Command being called is {command}')

    try:
        urllib3.disable_warnings()
        client = Client(urljoin(url, """"), verify_certificate, proxy, headers=headers)
        commands = {
            'gitlab-get-projects': get_projects_command,
            'gitlab-projects-get-access-requests': projects_get_access_requests_command,
            'gitlab-projects-request-access': projects_request_access_command,
            'gitlab-projects-approve-access': projects_approve_access_command,
            'gitlab-projects-deny-access': projects_deny_access_command,
            'gitlab-projects-get-repository-branches': projects_get_repository_branches_command,
            'gitlab-projects-create-repository-branch': projects_create_repository_branch_command,
            'gitlab-projects-delete-repository-branch': projects_delete_repository_branch_command,
            'gitlab-projects-delete-repository-merged-branches': projects_delete_repository_merged_branches_command,
            'gitlab-get-version': get_version_command,
            'gitlab-pipelines-schedules-list': gitlab_pipelines_schedules_list_command,
            'gitlab-pipelines-list': gitlab_pipelines_list_command,
            'gitlab-jobs-list': gitlab_jobs_list_command,
            'gitlab-artifact-get': gitlab_artifact_get_command,
            'gitlab-merge-requests-list': gitlab_merge_requests_list_command,
            'gitlab-get-merge-request': gitlab_get_merge_request_command,
            'gitlab-issues-list': gitlab_issues_list_command,
            'gitlab-create-issue': gitlab_create_issue_command,
            'gitlab-edit-issue': gitlab_edit_issue_command,
            'gitlab-group-projects-list': gitlab_group_projects_list_command,
            'gitlab-get-raw-file': gitlab_get_raw_file_command
        }

        if command == 'test-module':
            test_module(client)
        else:
            return_results(commands[command](client, args))

    except Exception as e:
        return_error(str(e))

","def main():
    params = demisto.params()
    args = demisto.args()
    url = params.get('url')
    verify_certificate = not params.get('insecure', False)
    proxy = params.get('proxy', False)
    headers = {}
    headers['PRIVATE-TOKEN'] = f'{params[""api_key""]}'

    command = demisto.command()
    LOG(f'Command being called is {command}')

    try:
        urllib3.disable_warnings()
        client = Client(urljoin(url, """"), verify_certificate, proxy, headers=headers)
        commands = {
            'gitlab-get-projects': get_projects_command,
            'gitlab-projects-get-access-requests': projects_get_access_requests_command,
            'gitlab-projects-request-access': projects_request_access_command,
            'gitlab-projects-approve-access': projects_approve_access_command,
            'gitlab-projects-deny-access': projects_deny_access_command,
            'gitlab-projects-get-repository-branches': projects_get_repository_branches_command,
            'gitlab-projects-create-repository-branch': projects_create_repository_branch_command,
            'gitlab-projects-delete-repository-branch': projects_delete_repository_branch_command,
            'gitlab-projects-delete-repository-merged-branches': projects_delete_repository_merged_branches_command,
            'gitlab-get-version': get_version_command,
            'gitlab-pipelines-schedules-list': gitlab_pipelines_schedules_list_command,
            'gitlab-pipelines-list': gitlab_pipelines_list_command,
            'gitlab-jobs-list': gitlab_jobs_list_command,
            'gitlab-artifact-get': gitlab_artifact_get_command,
            'gitlab-merge-requests-list': gitlab_merge_requests_list_command,
            'gitlab-get-merge-request': gitlab_get_merge_request_command,
            'gitlab-issues-list': gitlab_issues_list_command,
            'gitlab-create-issue': gitlab_create_issue_command,
            'gitlab-edit-issue': gitlab_edit_issue_command,
            'gitlab-group-projects-list': gitlab_group_projects_list_command,
            'gitlab-raw-file-get': gitlab_get_raw_file_command
        }

        if command == 'test-module':
            test_module(client)
        else:
            return_results(commands[command](client, args))

    except Exception as e:
        return_error(str(e))

"
31022,"def panorama_route_lookup(dest_ip: str, virtual_router=None):
    """"""
    Given the provided ip address, looks up the outgoing interface and zone on the firewall.
    """"""
    if not VSYS:
        raise Exception(""The 'panorama-route-lookup' command is only relevant for a Firewall instance."")

    response = panorama_get_routes(virtual_router)
    if 'entry' not in response['response']['result']:
        raise Exception(""No routes returned from the Firewall."")
    else:
        routes = response['response']['result']['entry']

    ip_addr = ipaddress.ip_address(dest_ip)
    current_match = None
    matched_route = None
    for route in routes:
        subnet_raw = route['destination']

        subnet = ipaddress.ip_network(subnet_raw)
        # If the given IP address is in the subnet
        if ip_addr in subnet:
            # IF we haven't matched yet
            if not current_match:
                current_match = subnet
                matched_route = route
            # If this is a greater subnet
            elif subnet.prefixlen > current_match.prefixlen:
                current_match = subnet
                matched_route = route

    if matched_route:
        return matched_route
    else:
        raise Exception(""Route not found."")

","def panorama_route_lookup(dest_ip: str, virtual_router=None):
    """"""
    Given the provided ip address, looks up the outgoing interface and zone on the firewall.
    """"""
    if not VSYS:
        raise Exception(""The 'panorama-route-lookup' command is only relevant for a Firewall instance."")

    response = panorama_get_routes(virtual_router)
    if 'entry' not in response['response'].get('result'):
        raise Exception(""No routes returned from the Firewall."")
    else:
        routes = response['response']['result']['entry']

    ip_addr = ipaddress.ip_address(dest_ip)
    current_match = None
    matched_route = None
    for route in routes:
        subnet_raw = route['destination']

        subnet = ipaddress.ip_network(subnet_raw)
        # If the given IP address is in the subnet
        if ip_addr in subnet:
            # IF we haven't matched yet
            if not current_match:
                current_match = subnet
                matched_route = route
            # If this is a greater subnet
            elif subnet.prefixlen > current_match.prefixlen:
                current_match = subnet
                matched_route = route

    if matched_route:
        return matched_route
    else:
        raise Exception(""Route not found."")

"
9366,"def test_wrap_var_tuple():
    assert not isinstance(wrap_var(('foo',)), AnsibleUnsafe)
    assert not isinstance(wrap_var(('foo',))[0], AnsibleUnsafe)

","def test_wrap_var_tuple():
    assert not isinstance(wrap_var(('foo',)), AnsibleUnsafe)
    assert isinstance(wrap_var(('foo',))[0], type(''))

"
27762,"def test_show_multi_test_fixture_setup_and_teardown_correctly_simple(pytester: Pytester):
    """"""Verify that when a fixture lives for longer than a single test, --setup-plan
    correctly displays the SETUP/TEARDOWN indicators the right number of times.

    As reported in https://github.com/pytest-dev/pytest/issues/2049
    --setup-plan was showing SETUP/TEARDOWN on every test, even when the fixture
    should persist through multiple tests.

    (Note that this bug never affected actual test execution, which used the
    correct fixture lifetimes. It was purely a display bug for --setup-plan, and
    did not affect the related --setup-show or --setup-only.)
    """"""
    pytester.makepyfile(
        """"""
        import pytest
        @pytest.fixture(scope = 'class')
        def fix():
            return object()
        class TestClass:
            def test_one(self, fix):
                assert False
            def test_two(self, fix):
                assert False
    """"""
    )

    result = pytester.runpytest(""--setup-plan"")
    assert result.ret == 0

    setup_fragment = ""SETUP    C fix""
    setup_count = 0

    teardown_fragment = ""TEARDOWN C fix""
    teardown_count = 0

    for line in result.stdout.lines:
        if setup_fragment in line:
            setup_count += 1
        if teardown_fragment in line:
            teardown_count += 1

    # before the fix this tests, there would have been a setup/teardown
    # message for each test, so the counts would each have been 2
    assert setup_count == 1
    assert teardown_count == 1

","def test_show_multi_test_fixture_setup_and_teardown_correctly_simple(pytester: Pytester) -> None:
    """"""Verify that when a fixture lives for longer than a single test, --setup-plan
    correctly displays the SETUP/TEARDOWN indicators the right number of times.

    As reported in https://github.com/pytest-dev/pytest/issues/2049
    --setup-plan was showing SETUP/TEARDOWN on every test, even when the fixture
    should persist through multiple tests.

    (Note that this bug never affected actual test execution, which used the
    correct fixture lifetimes. It was purely a display bug for --setup-plan, and
    did not affect the related --setup-show or --setup-only.)
    """"""
    pytester.makepyfile(
        """"""
        import pytest
        @pytest.fixture(scope = 'class')
        def fix():
            return object()
        class TestClass:
            def test_one(self, fix):
                assert False
            def test_two(self, fix):
                assert False
    """"""
    )

    result = pytester.runpytest(""--setup-plan"")
    assert result.ret == 0

    setup_fragment = ""SETUP    C fix""
    setup_count = 0

    teardown_fragment = ""TEARDOWN C fix""
    teardown_count = 0

    for line in result.stdout.lines:
        if setup_fragment in line:
            setup_count += 1
        if teardown_fragment in line:
            teardown_count += 1

    # before the fix this tests, there would have been a setup/teardown
    # message for each test, so the counts would each have been 2
    assert setup_count == 1
    assert teardown_count == 1

"
12316,"def brmesolve(H, psi0, tlist, a_ops=[], e_ops=[], c_ops=[],
              args={}, sec_cutoff=0.1, options=None):
    """"""
    Solves for the dynamics of a system using the Bloch-Redfield master
    equation, given an input Hamiltonian, Hermitian bath-coupling terms and
    their associated spectral functions, as well as possible Lindblad collapse
    operators.

    Parameters
    ----------
    H : :class:`Qobj`, :class:`QobjEvo`
        Possibly time-dependent system Liouvillian or Hamiltonian as a Qobj or
        QobjEvo. list of [:class:`Qobj`, :class:`Coefficient`] or callable that
        can be made into :class:`QobjEvo` are also accepted.

    psi0: Qobj
        Initial density matrix or state vector (ket).

    tlist : array_like
        List of times for evaluating evolution

    a_ops : list of (a_op, spectra)
        Nested list of system operators that couple to the environment,
        and the corresponding bath spectra.

        a_op : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`
            The operator coupling to the environment. Must be hermitian.

        spectra : :class:`Coefficient`, str, func
            The corresponding bath spectral responce.
            Can be a `Coefficient` using an 'w' args, a function of the
            frequence or a string. Coefficient build from a numpy array are
            understood as a function of ``w`` instead of ``t``. Function are
            expected to be of the signature ``f(w)`` or ``f(t, w, **args)``.

            The spectra function can depend on ``t`` if the corresponding
            ``a_op`` is a :class:`QobjEvo`.

        Example:

        .. code-block::

            a_ops = [
                (a+a.dag(), ('w>0', args={""w"": 0})),
                (QobjEvo(a+a.dag()), 'w > exp(-t)'),
                (QobjEvo([b+b.dag(), lambda t: ...]), lambda w: ...)),
                (c+c.dag(), SpectraCoefficient(coefficient(array, tlist=ws))),
            ]

        .. note:
            ``Cubic_Spline`` have been replaced by :class:`Coefficient`\:
                ``spline = qutip.coefficient(array, tlist=times)``

            Whether the ``a_ops`` is time dependent is deceided by the type of
            the operator: :class:`Qobj` vs :class:`QobjEvo` instead of the type
            of the spectra.

    e_ops : list of :class:`Qobj` / callback function
        Single operator or list of operators for which to evaluate
        expectation values or callable or list of callable.
        Callable signature must be, `f(t: float, state: Qobj)`.
        See :func:`expect` for more detail of operator expectation

    c_ops : list of (:class:`QobjEvo`, :class:`QobjEvo` compatible format)
        List of collapse operators.

    args : dict
        Dictionary of parameters for time-dependent Hamiltonians and
        collapse operators. The key ``w`` is reserved for the spectra function.

    sec_cutoff : float {0.1}
        Cutoff for secular approximation. Use ``-1`` if secular approximation
        is not used when evaluating bath-coupling terms.

    options : :class:`qutip.solver.SolverOptions`
        Options for the solver.

    Returns
    -------
    result: :class:`qutip.solver.Result`

        An instance of the class :class:`qutip.solver.Result`, which contains
        either an array of expectation values, for operators given in e_ops,
        or a list of states for the times specified by `tlist`.

    .. note:
        The option ``operator_data_type`` is used to determine in which format
        the bloch redfield tensor is computed. Use 'csr' for sparse and 'dense'
        for dense array. With 'data', it will try to use the same data type as
        the ``a_ops``, but it is usually less efficient than manually choosing
        it.
    """"""
    H = QobjEvo(H, args=args, tlist=tlist)

    c_ops = c_ops if c_ops is not None else []
    if not isinstance(c_ops, (list, tuple)):
        c_ops = [c_ops]
    c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops]

    new_a_ops = []
    for (a_op, spectra) in a_ops:
        aop = QobjEvo(a_op, args=args, tlist=tlist)
        if isinstance(spectra, str):
            new_a_ops.append(
                (aop, coefficient(spectra, args={**args, 'w':0})))
        elif isinstance(spectra, InterCoefficient):
            new_a_ops.append((aop, SpectraCoefficient(spectra)))
        elif isinstance(spectra, Coefficient):
            new_a_ops.append((aop, spectra))
        elif callable(spectra):
            sig = inspect.signature(spectra)
            if tuple(sig.parameters.keys()) == (""w"",):
                spec = SpectraCoefficient(coefficient(spectra))
            else:
                spec = coefficient(spectra, args={**args, 'w':0})
            new_a_ops.append((aop, spec))
        else:
            raise TypeError(""a_ops's spectra not known"")

    solver = BRSolver(
        H, new_a_ops, c_ops, options=options, sec_cutoff=sec_cutoff,
    )

    return solver.run(psi0, tlist, e_ops=e_ops)

","def brmesolve(H, psi0, tlist, a_ops=[], e_ops=[], c_ops=[],
              args={}, sec_cutoff=0.1, options=None):
    """"""
    Solves for the dynamics of a system using the Bloch-Redfield master
    equation, given an input Hamiltonian, Hermitian bath-coupling terms and
    their associated spectral functions, as well as possible Lindblad collapse
    operators.

    Parameters
    ----------
    H : :class:`Qobj`, :class:`QobjEvo`
        Possibly time-dependent system Liouvillian or Hamiltonian as a Qobj or
        QobjEvo. list of [:class:`Qobj`, :class:`Coefficient`] or callable that
        can be made into :class:`QobjEvo` are also accepted.

    psi0: Qobj
        Initial density matrix or state vector (ket).

    tlist : array_like
        List of times for evaluating evolution

    a_ops : list of (a_op, spectra)
        Nested list of system operators that couple to the environment,
        and the corresponding bath spectra.

        a_op : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`
            The operator coupling to the environment. Must be hermitian.

        spectra : :class:`Coefficient`, str, func
            The corresponding bath spectral responce.
            Can be a `Coefficient` using an 'w' args, a function of the
            frequence or a string. Coefficient build from a numpy array are
            understood as a function of ``w`` instead of ``t``. Function are
            expected to be of the signature ``f(w)`` or ``f(t, w, **args)``.

            The spectra function can depend on ``t`` if the corresponding
            ``a_op`` is a :class:`QobjEvo`.

        Example:

        .. code-block::

            a_ops = [
                (a+a.dag(), ('w>0', args={""w"": 0})),
                (QobjEvo(a+a.dag()), 'w > exp(-t)'),
                (QobjEvo([b+b.dag(), lambda t: ...]), lambda w: ...)),
                (c+c.dag(), SpectraCoefficient(coefficient(array, tlist=ws))),
            ]

        .. note:
            ``Cubic_Spline`` have been replaced by :class:`Coefficient`\:
                ``spline = qutip.coefficient(array, tlist=times)``

            Whether the ``a_ops`` is time dependent is decided by the type of
            the operator: :class:`Qobj` vs :class:`QobjEvo` instead of the type
            of the spectra.

    e_ops : list of :class:`Qobj` / callback function
        Single operator or list of operators for which to evaluate
        expectation values or callable or list of callable.
        Callable signature must be, `f(t: float, state: Qobj)`.
        See :func:`expect` for more detail of operator expectation

    c_ops : list of (:class:`QobjEvo`, :class:`QobjEvo` compatible format)
        List of collapse operators.

    args : dict
        Dictionary of parameters for time-dependent Hamiltonians and
        collapse operators. The key ``w`` is reserved for the spectra function.

    sec_cutoff : float {0.1}
        Cutoff for secular approximation. Use ``-1`` if secular approximation
        is not used when evaluating bath-coupling terms.

    options : :class:`qutip.solver.SolverOptions`
        Options for the solver.

    Returns
    -------
    result: :class:`qutip.solver.Result`

        An instance of the class :class:`qutip.solver.Result`, which contains
        either an array of expectation values, for operators given in e_ops,
        or a list of states for the times specified by `tlist`.

    .. note:
        The option ``operator_data_type`` is used to determine in which format
        the bloch redfield tensor is computed. Use 'csr' for sparse and 'dense'
        for dense array. With 'data', it will try to use the same data type as
        the ``a_ops``, but it is usually less efficient than manually choosing
        it.
    """"""
    H = QobjEvo(H, args=args, tlist=tlist)

    c_ops = c_ops if c_ops is not None else []
    if not isinstance(c_ops, (list, tuple)):
        c_ops = [c_ops]
    c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops]

    new_a_ops = []
    for (a_op, spectra) in a_ops:
        aop = QobjEvo(a_op, args=args, tlist=tlist)
        if isinstance(spectra, str):
            new_a_ops.append(
                (aop, coefficient(spectra, args={**args, 'w':0})))
        elif isinstance(spectra, InterCoefficient):
            new_a_ops.append((aop, SpectraCoefficient(spectra)))
        elif isinstance(spectra, Coefficient):
            new_a_ops.append((aop, spectra))
        elif callable(spectra):
            sig = inspect.signature(spectra)
            if tuple(sig.parameters.keys()) == (""w"",):
                spec = SpectraCoefficient(coefficient(spectra))
            else:
                spec = coefficient(spectra, args={**args, 'w':0})
            new_a_ops.append((aop, spec))
        else:
            raise TypeError(""a_ops's spectra not known"")

    solver = BRSolver(
        H, new_a_ops, c_ops, options=options, sec_cutoff=sec_cutoff,
    )

    return solver.run(psi0, tlist, e_ops=e_ops)

"
6527,"def get_columns():
	columns = [
		{
			""fieldname"": ""name"",
			""fieldtype"": ""Link"",
			""label"": ""Task"",
			""options"": ""Task"",
			""width"": 150
		},
		{
			""fieldname"": ""subject"",
			""fieldtype"": ""Data"",
			""label"": ""Subject"",
			""width"": 200
		},
		{
			""fieldname"": ""status"",
			""fieldtype"": ""Data"",
			""label"": ""Status"",
			""width"": 100
		},
		{
			""fieldname"": ""priority"",
			""fieldtype"": ""Data"",
			""label"": ""Priority"",
			""width"": 100
		},
		{
			""fieldname"": ""progress"",
			""fieldtype"": ""Data"",
			""label"": ""Progress (%)"",
			""width"": 100
		},
		{
			""fieldname"": ""exp_start_date"",
			""fieldtype"": ""Date"",
			""label"": ""Expected Start Date"",
			""width"": 150
		},
		{
			""fieldname"": ""exp_end_date"",
			""fieldtype"": ""Date"",
			""label"": ""Expected End Date"",
			""width"": 150
		},
		{
			""fieldname"": ""completed_on"",
			""fieldtype"": ""Date"",
			""label"": ""Actual End Date"",
			""width"": 130
		},
		{
			""fieldname"": ""delay"",
			""fieldtype"": ""Data"",
			""label"": ""Delay"",
			""width"": 70
		}
	]
	return columns
","def get_columns():
	columns = [
		{
			""fieldname"": ""name"",
			""fieldtype"": ""Link"",
			""label"": ""Task"",
			""options"": ""Task"",
			""width"": 150
		},
		{
			""fieldname"": ""subject"",
			""fieldtype"": ""Data"",
			""label"": ""Subject"",
			""width"": 200
		},
		{
			""fieldname"": ""status"",
			""fieldtype"": ""Data"",
			""label"": ""Status"",
			""width"": 100
		},
		{
			""fieldname"": ""priority"",
			""fieldtype"": ""Data"",
			""label"": ""Priority"",
			""width"": 100
		},
		{
			""fieldname"": ""progress"",
			""fieldtype"": ""Data"",
			""label"": ""Progress (%)"",
			""width"": 100
		},
		{
			""fieldname"": ""exp_start_date"",
			""fieldtype"": ""Date"",
			""label"": ""Expected Start Date"",
			""width"": 150
		},
		{
			""fieldname"": ""exp_end_date"",
			""fieldtype"": ""Date"",
			""label"": ""Expected End Date"",
			""width"": 150
		},
		{
			""fieldname"": ""completed_on"",
			""fieldtype"": ""Date"",
			""label"": ""Actual End Date"",
			""width"": 130
		},
		{
			""fieldname"": ""delay"",
			""fieldtype"": ""Data"",
			""label"": ""Delay (In Days)"",
			""width"": 70
		}
	]
	return columns
"
3903,"def extrema_bounding(G, compute=""diameter""):
    """"""Compute requested extreme distance metric of undirected graph G

    Computation is based on smart lower and upper bounds, and in practice
    linear in the number of nodes, rather than quadratic (except for some
    border cases such as complete graphs or circle shaped graphs).

    Parameters
    ----------
    G : NetworkX graph
       An undirected graph

    compute : string denoting the requesting metric
       ""diameter"" for the maximal eccentricity value,
       ""radius"" for the minimal eccentricity value,
       ""periphery"" for the set of nodes with eccentricity equal to the diameter,
       ""center"" for the set of nodes with eccentricity equal to the radius,
       ""eccentricities"" for the maximum distance from each node to all other nodes in G

    Returns
    -------
    value : value of the requested metric
       int for ""diameter"" and ""radius"" or
       list of nodes for ""center"" and ""periphery"" or
       dictionary of eccentricity values keyed by node for ""eccentricities""

    Raises
    ------
    NetworkXError
        If the graph consists of multiple components or
        If the compute parameter is passed an invalid argument

    Notes
    -----
    This algorithm was proposed in the following papers:

    F.W. Takes and W.A. Kosters, Determining the Diameter of Small World
    Networks, in Proceedings of the 20th ACM International Conference on
    Information and Knowledge Management (CIKM 2011), pp. 1191-1196, 2011.
    doi: https://doi.org/10.1145/2063576.2063748

    F.W. Takes and W.A. Kosters, Computing the Eccentricity Distribution of
    Large Graphs, Algorithms 6(1): 100-118, 2013.
    doi: https://doi.org/10.3390/a6010100

    M. Borassi, P. Crescenzi, M. Habib, W.A. Kosters, A. Marino and F.W. Takes,
    Fast Graph Diameter and Radius BFS-Based Computation in (Weakly Connected)
    Real-World Graphs, Theoretical Computer Science 586: 59-80, 2015.
    doi: https://doi.org/10.1016/j.tcs.2015.02.033
    """"""

    # init variables
    degrees = dict(G.degree())  # start with the highest degree node
    minlowernode = max(degrees, key=degrees.get)
    N = len(degrees)  # number of nodes
    # alternate between smallest lower and largest upper bound
    high = False
    # status variables
    ecc_lower = dict.fromkeys(G, 0)
    ecc_upper = dict.fromkeys(G, N)
    candidates = set(G)

    # (re)set bound extremes
    minlower = N
    maxlower = 0
    minupper = N
    maxupper = 0

    # repeat the following until there are no more candidates
    while candidates:
        if high:
            current = maxuppernode  # select node with largest upper bound
        else:
            current = minlowernode  # select node with smallest lower bound
        high = not high

        # get distances from/to current node and derive eccentricity
        dist = dict(nx.single_source_shortest_path_length(G, current))
        if len(dist) != N:
            msg = ""Cannot compute metric because graph is not connected.""
            raise nx.NetworkXError(msg)
        current_ecc = max(dist.values())

        # print status update
        #        print (""ecc of "" + str(current) + "" ("" + str(ecc_lower[current]) + ""/""
        #        + str(ecc_upper[current]) + "", deg: "" + str(dist[current]) + "") is ""
        #        + str(current_ecc))
        #        print(ecc_upper)

        # (re)set bound extremes
        maxuppernode = None
        minlowernode = None

        # update node bounds
        for i in candidates:
            # update eccentricity bounds
            d = dist[i]
            ecc_lower[i] = low = max(ecc_lower[i], max(d, (current_ecc - d)))
            ecc_upper[i] = upp = min(ecc_upper[i], current_ecc + d)

            # update min/max values of lower and upper bounds
            minlower = min(ecc_lower[i], minlower)
            maxlower = max(ecc_lower[i], maxlower)
            minupper = min(ecc_upper[i], minupper)
            maxupper = max(ecc_upper[i], maxupper)

        # update candidate set
        if compute == ""diameter"":
            ruled_out = {
                i
                for i in candidates
                if ecc_upper[i] <= maxlower and 2 * ecc_lower[i] >= maxupper
            }
        elif compute == ""radius"":
            ruled_out = {
                i
                for i in candidates
                if ecc_lower[i] >= minupper and ecc_upper[i] + 1 <= 2 * minlower
            }
        elif compute == ""periphery"":
            ruled_out = {
                i
                for i in candidates
                if ecc_upper[i] < maxlower
                and (maxlower == maxupper or ecc_lower[i] > maxupper)
            }
        elif compute == ""center"":
            ruled_out = {
                i
                for i in candidates
                if ecc_lower[i] > minupper
                and (minlower == minupper or ecc_upper[i] + 1 < 2 * minlower)
            }
        elif compute == ""eccentricities"":
            ruled_out = set()
        else:
            msg = ""The argument passed to compute parameter is invalid. Please enter one of the following extreme distance metrics: 'diameter', 'radius', 'periphery', 'center', 'eccentricities'""
            raise nx.NetworkXError(msg)

        ruled_out.update(i for i in candidates if ecc_lower[i] == ecc_upper[i])
        candidates -= ruled_out

        #        for i in ruled_out:
        #            print(""removing %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g""%
        #                    (i,ecc_upper[i],maxlower,ecc_lower[i],maxupper))
        #        print(""node %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g""%
        #                    (4,ecc_upper[4],maxlower,ecc_lower[4],maxupper))
        #        print(""NODE 4: %g""%(ecc_upper[4] <= maxlower))
        #        print(""NODE 4: %g""%(2 * ecc_lower[4] >= maxupper))
        #        print(""NODE 4: %g""%(ecc_upper[4] <= maxlower
        #                            and 2 * ecc_lower[4] >= maxupper))

        # updating maxuppernode and minlowernode for selection in next round
        for i in candidates:
            if (
                minlowernode is None
                or (
                    ecc_lower[i] == ecc_lower[minlowernode]
                    and degrees[i] > degrees[minlowernode]
                )
                or (ecc_lower[i] < ecc_lower[minlowernode])
            ):
                minlowernode = i

            if (
                maxuppernode is None
                or (
                    ecc_upper[i] == ecc_upper[maxuppernode]
                    and degrees[i] > degrees[maxuppernode]
                )
                or (ecc_upper[i] > ecc_upper[maxuppernode])
            ):
                maxuppernode = i

        # print status update
    #        print ("" min="" + str(minlower) + ""/"" + str(minupper) +
    #        "" max="" + str(maxlower) + ""/"" + str(maxupper) +
    #        "" candidates: "" + str(len(candidates)))
    #        print(""cand:"",candidates)
    #        print(""ecc_l"",ecc_lower)
    #        print(""ecc_u"",ecc_upper)
    #        wait = input(""press Enter to continue"")

    # return the correct value of the requested metric
    if compute == ""diameter"":
        return maxlower
    elif compute == ""radius"":
        return minupper
    elif compute == ""periphery"":
        p = [v for v in G if ecc_lower[v] == maxlower]
        return p
    elif compute == ""center"":
        c = [v for v in G if ecc_upper[v] == minupper]
        return c
    elif compute == ""eccentricities"":
        return ecc_lower
    return None

","def extrema_bounding(G, compute=""diameter""):
    """"""Compute requested extreme distance metric of undirected graph G

    Computation is based on smart lower and upper bounds, and in practice
    linear in the number of nodes, rather than quadratic (except for some
    border cases such as complete graphs or circle shaped graphs).

    Parameters
    ----------
    G : NetworkX graph
       An undirected graph

    compute : string denoting the requesting metric
       ""diameter"" for the maximal eccentricity value,
       ""radius"" for the minimal eccentricity value,
       ""periphery"" for the set of nodes with eccentricity equal to the diameter,
       ""center"" for the set of nodes with eccentricity equal to the radius,
       ""eccentricities"" for the maximum distance from each node to all other nodes in G

    Returns
    -------
    value : value of the requested metric
       int for ""diameter"" and ""radius"" or
       list of nodes for ""center"" and ""periphery"" or
       dictionary of eccentricity values keyed by node for ""eccentricities""

    Raises
    ------
    NetworkXError
        If the graph consists of multiple components or
        If the compute parameter is passed an invalid argument

    Notes
    -----
    This algorithm was proposed in the following papers:

    F.W. Takes and W.A. Kosters, Determining the Diameter of Small World
    Networks, in Proceedings of the 20th ACM International Conference on
    Information and Knowledge Management (CIKM 2011), pp. 1191-1196, 2011.
    doi: https://doi.org/10.1145/2063576.2063748

    F.W. Takes and W.A. Kosters, Computing the Eccentricity Distribution of
    Large Graphs, Algorithms 6(1): 100-118, 2013.
    doi: https://doi.org/10.3390/a6010100

    M. Borassi, P. Crescenzi, M. Habib, W.A. Kosters, A. Marino and F.W. Takes,
    Fast Graph Diameter and Radius BFS-Based Computation in (Weakly Connected)
    Real-World Graphs, Theoretical Computer Science 586: 59-80, 2015.
    doi: https://doi.org/10.1016/j.tcs.2015.02.033
    """"""

    # init variables
    degrees = dict(G.degree())  # start with the highest degree node
    minlowernode = max(degrees, key=degrees.get)
    N = len(degrees)  # number of nodes
    # alternate between smallest lower and largest upper bound
    high = False
    # status variables
    ecc_lower = dict.fromkeys(G, 0)
    ecc_upper = dict.fromkeys(G, N)
    candidates = set(G)

    # (re)set bound extremes
    minlower = N
    maxlower = 0
    minupper = N
    maxupper = 0

    # repeat the following until there are no more candidates
    while candidates:
        if high:
            current = maxuppernode  # select node with largest upper bound
        else:
            current = minlowernode  # select node with smallest lower bound
        high = not high

        # get distances from/to current node and derive eccentricity
        dist = dict(nx.single_source_shortest_path_length(G, current))
        if len(dist) != N:
            msg = ""Cannot compute metric because graph is not connected.""
            raise nx.NetworkXError(msg)
        current_ecc = max(dist.values())

        # print status update
        #        print (""ecc of "" + str(current) + "" ("" + str(ecc_lower[current]) + ""/""
        #        + str(ecc_upper[current]) + "", deg: "" + str(dist[current]) + "") is ""
        #        + str(current_ecc))
        #        print(ecc_upper)

        # (re)set bound extremes
        maxuppernode = None
        minlowernode = None

        # update node bounds
        for i in candidates:
            # update eccentricity bounds
            d = dist[i]
            ecc_lower[i] = low = max(ecc_lower[i], max(d, (current_ecc - d)))
            ecc_upper[i] = upp = min(ecc_upper[i], current_ecc + d)

            # update min/max values of lower and upper bounds
            minlower = min(ecc_lower[i], minlower)
            maxlower = max(ecc_lower[i], maxlower)
            minupper = min(ecc_upper[i], minupper)
            maxupper = max(ecc_upper[i], maxupper)

        # update candidate set
        if compute == ""diameter"":
            ruled_out = {
                i
                for i in candidates
                if ecc_upper[i] <= maxlower and 2 * ecc_lower[i] >= maxupper
            }
        elif compute == ""radius"":
            ruled_out = {
                i
                for i in candidates
                if ecc_lower[i] >= minupper and ecc_upper[i] + 1 <= 2 * minlower
            }
        elif compute == ""periphery"":
            ruled_out = {
                i
                for i in candidates
                if ecc_upper[i] < maxlower
                and (maxlower == maxupper or ecc_lower[i] > maxupper)
            }
        elif compute == ""center"":
            ruled_out = {
                i
                for i in candidates
                if ecc_lower[i] > minupper
                and (minlower == minupper or ecc_upper[i] + 1 < 2 * minlower)
            }
        elif compute == ""eccentricities"":
            ruled_out = set()
        else:
            msg = ""The argument passed to compute parameter is invalid. Please enter one of the following extreme distance metrics: 'diameter', 'radius', 'periphery', 'center', 'eccentricities'""
            raise ValueError(msg)

        ruled_out.update(i for i in candidates if ecc_lower[i] == ecc_upper[i])
        candidates -= ruled_out

        #        for i in ruled_out:
        #            print(""removing %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g""%
        #                    (i,ecc_upper[i],maxlower,ecc_lower[i],maxupper))
        #        print(""node %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g""%
        #                    (4,ecc_upper[4],maxlower,ecc_lower[4],maxupper))
        #        print(""NODE 4: %g""%(ecc_upper[4] <= maxlower))
        #        print(""NODE 4: %g""%(2 * ecc_lower[4] >= maxupper))
        #        print(""NODE 4: %g""%(ecc_upper[4] <= maxlower
        #                            and 2 * ecc_lower[4] >= maxupper))

        # updating maxuppernode and minlowernode for selection in next round
        for i in candidates:
            if (
                minlowernode is None
                or (
                    ecc_lower[i] == ecc_lower[minlowernode]
                    and degrees[i] > degrees[minlowernode]
                )
                or (ecc_lower[i] < ecc_lower[minlowernode])
            ):
                minlowernode = i

            if (
                maxuppernode is None
                or (
                    ecc_upper[i] == ecc_upper[maxuppernode]
                    and degrees[i] > degrees[maxuppernode]
                )
                or (ecc_upper[i] > ecc_upper[maxuppernode])
            ):
                maxuppernode = i

        # print status update
    #        print ("" min="" + str(minlower) + ""/"" + str(minupper) +
    #        "" max="" + str(maxlower) + ""/"" + str(maxupper) +
    #        "" candidates: "" + str(len(candidates)))
    #        print(""cand:"",candidates)
    #        print(""ecc_l"",ecc_lower)
    #        print(""ecc_u"",ecc_upper)
    #        wait = input(""press Enter to continue"")

    # return the correct value of the requested metric
    if compute == ""diameter"":
        return maxlower
    elif compute == ""radius"":
        return minupper
    elif compute == ""periphery"":
        p = [v for v in G if ecc_lower[v] == maxlower]
        return p
    elif compute == ""center"":
        c = [v for v in G if ecc_upper[v] == minupper]
        return c
    elif compute == ""eccentricities"":
        return ecc_lower
    return None

"
42425,"def update_e3dc_battery(address: Iterable[str],external: int,pvother: int,pvwattin: int):
    soc = 0
    count = 0
    speicher = 0
    # extpv - > pv Leistung die als externe Produktion an e3dc angeschlossen ist
    # nur auslesen wenn als relevant parametrisiert  (external = 1) , sonst doppelte Auslesung
    extpv = 0
    # pv -> pv Leistung die direkt an e3dc angeschlossen ist
    pv = 0
    for addr in address:
        log.debug(""Battery Ip: %s, external %d pvother %d pvwatt (input) %d"", addr,external,pvother,pvwattin)
        if addr != ""none"":
            count=count+1
            client = ModbusClient(addr, port=502)
            #40082 soc
            soc = soc + client.read_holding_registers(40082,ModbusDataType.INT_16,unit=1)
            #40069 speicherleistung
            speicher = speicher + client.read_holding_registers(40069, ModbusDataType.INT_32, wordorder=Endian.Little,unit=1)
            #40067 pv Leistung
            pv = pv + (client.read_holding_registers(40067, ModbusDataType.INT_32, wordorder=Endian.Little,unit=1) * -1)
            if external == 1:
                #40075 externe pv Leistung
                extpv = extpv + client.read_holding_registers(40075, ModbusDataType.INT_32, wordorder=Endian.Little,unit=1)
    soc = soc / count
    log.debug(""Battery soc %d speicherleistung %d pv %d extpv %d anzahl ip %d"", soc,speicher,pv,extpv,count)
    cnt= SimCountFactory().get_sim_counter()().sim_count(speicher, prefix=""speicher"")
    get_bat_value_store(1).set(BatState(power=speicher, soc=soc, imported= cnt[0], exported= cnt[1]))
    # pvother sagt aus, ob wr definiert ist, und dessen pv Leistungs auch gilt
    # wenn 0 gilt nur pv und extpv aus e3dc
    pvtotal = pv + extpv
    if (pvother == 0) or (pvtotal != 0):
        if pvother == 1:
            pvtotal = pvtotal + pvwattin
        log.debug("" wr update pvother %d pvtotal %d"", pvother,pvtotal)
        cntpv= SimCountFactory().get_sim_counter()().sim_count(pvtotal, prefix=""pv"")
        get_inverter_value_store(1).set(InverterState(counter=cntpv[1], power=pvtotal))

","def update_e3dc_battery(address: Iterable[str],external: int,pvother: int,pvwattin: int):
    soc = 0
    count = 0
    speicher = 0
    # extpv - > pv Leistung die als externe Produktion an e3dc angeschlossen ist
    # nur auslesen wenn als relevant parametrisiert  (external = 1) , sonst doppelte Auslesung
    extpv = 0
    # pv -> pv Leistung die direkt an e3dc angeschlossen ist
    pv = 0
    for addr in address:
        log.debug(""Battery Ip: %s, external %d pvother %d pvwatt (input) %d"", addr,external,pvother,pvwattin)
        if addr != ""none"":
            count=count+1
            client = ModbusClient(addr, port=502)
            #40082 soc
            soc = soc + client.read_holding_registers(40082,ModbusDataType.INT_16,unit=1)
            #40069 speicherleistung
            speicher = speicher + client.read_holding_registers(40069, ModbusDataType.INT_32, wordorder=Endian.Little,unit=1)
            #40067 pv Leistung
            pv = pv + (client.read_holding_registers(40067, ModbusDataType.INT_32, wordorder=Endian.Little,unit=1) * -1)
            if external == 1:
                #40075 externe pv Leistung
                extpv = extpv + client.read_holding_registers(40075, ModbusDataType.INT_32, wordorder=Endian.Little,unit=1)
    soc = soc / count
    log.debug(""Battery soc %d speicherleistung %d pv %d extpv %d anzahl ip %d"", soc,speicher,pv,extpv,count)
    cnt= SimCountFactory().get_sim_counter()().sim_count(speicher, prefix=""speicher"")
    get_bat_value_store(1).set(BatState(power=speicher, soc=soc, imported= cnt[0], exported= cnt[1]))
    # pvother sagt aus, ob wr definiert ist, und dessen pv Leistungs auch gilt
    # wenn 0 gilt nur pv und extpv aus e3dc
    pvtotal = pv + extpv
    if (pvother == 0) or (pvtotal != 0):
        if pvother == 1:
            pvtotal = pvtotal + pvwattin
        log.debug("" wr update pvother %d pvtotal %d"", pvother,pvtotal)
        _, pv_counter = SimCountFactory().get_sim_counter()().sim_count(pvtotal, prefix=""pv"")
        get_inverter_value_store(1).set(InverterState(counter=cntpv[1], power=pvtotal))

"
1307,"def test_bagging_classifier_voting():
    # Test BaggingClassifier when base_estimator doesn't define predict_proba
    A = np.random.rand(10, 4)
    Y = np.random.randint(2, size=10, dtype=np.bool)
    bagging_classifier = BaggingClassifier(DummyVoteClassifier())
    bagging_classifier.fit(A, Y)
    # All ensemble members predict True; BaggingClassifier should predict True
    assert(bagging_classifier.predict(A).all())
","def test_bagging_classifier_voting():
    # Test BaggingClassifier when base_estimator doesn't define predict_proba
    A = np.random.rand(10, 4)
    Y = np.random.randint(2, size=10, dtype=np.bool)
    bagging_classifier = BaggingClassifier(DummyVoteClassifier())
    bagging_classifier.fit(X, y)
    # All ensemble members predict True; BaggingClassifier should predict True
    assert(bagging_classifier.predict(A).all())
"
36907,"def add_parser(subparsers, parent_parser):
    FREEZE_HELP = ""Freeze stages or .dvc files.""
    freeze_parser = subparsers.add_parser(
        ""freeze"",
        parents=[parent_parser],
        description=append_doc_link(FREEZE_HELP, ""freeze""),
        help=FREEZE_HELP,
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    freeze_parser.add_argument(
        ""targets"",
        nargs=""+"",
        help=""Stages or .dvc files to freeze."",
        metavar=""targets"",
        choices=completion.Required.DVC_FILE,
    )
    freeze_parser.set_defaults(func=CmdFreeze)

    UNFREEZE_HELP = ""Unfreeze stages or .dvc files.""
    unfreeze_parser = subparsers.add_parser(
        ""unfreeze"",
        parents=[parent_parser],
        description=append_doc_link(UNFREEZE_HELP, ""unfreeze""),
        help=UNFREEZE_HELP,
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    unfreeze_parser.add_argument(
        ""targets"",
        nargs=""+"",
        help=""Stages or .dvc files to unfreeze."",
        metavar=""targets"",
        choices=completion.Required.DVC_FILE,
    )
    unfreeze_parser.set_defaults(func=CmdUnfreeze)
","def add_parser(subparsers, parent_parser):
    FREEZE_HELP = ""Freeze stages or .dvc files.""
    freeze_parser = subparsers.add_parser(
        ""freeze"",
        parents=[parent_parser],
        description=append_doc_link(FREEZE_HELP, ""freeze""),
        help=FREEZE_HELP,
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    freeze_parser.add_argument(
        ""targets"",
        nargs=""+"",
        help=""Stages or .dvc files to freeze."",
        metavar=""targets"",
        choices=completion.Required.DVC_FILE,
    )
    freeze_parser.set_defaults(func=CmdFreeze)

    UNFREEZE_HELP = ""Unfreeze stages or .dvc files.""
    unfreeze_parser = subparsers.add_parser(
        ""unfreeze"",
        parents=[parent_parser],
        description=append_doc_link(UNFREEZE_HELP, ""unfreeze""),
        help=UNFREEZE_HELP,
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    unfreeze_parser.add_argument(
        ""targets"",
        nargs=""+"",
        help=""Stages or .dvc files to unfreeze"",
        metavar=""targets"",
        choices=completion.Required.DVC_FILE,
    )
    unfreeze_parser.set_defaults(func=CmdUnfreeze)
"
43244,"def onpolicy_trainer(
    policy: BasePolicy,
    train_collector: Collector,
    test_collector: Collector,
    max_epoch: int,
    step_per_epoch: int,
    step_per_collect: int,
    repeat_per_collect: int,
    episode_per_test: int,
    batch_size: int,
    train_fn: Optional[Callable[[int, int], None]] = None,
    test_fn: Optional[Callable[[int, Optional[int]], None]] = None,
    stop_fn: Optional[Callable[[float], bool]] = None,
    save_fn: Optional[Callable[[BasePolicy], None]] = None,
    reward_metric: Optional[Callable[[np.ndarray], np.ndarray]] = None,
    writer: Optional[SummaryWriter] = None,
    log_interval: int = 1,
    verbose: bool = True,
    test_in_train: bool = True,
    collect_method = ""episode"",
) -> Dict[str, Union[float, str]]:
    """"""A wrapper for on-policy trainer procedure.

    The ""step"" in trainer means a policy network update.

    :param policy: an instance of the :class:`~tianshou.policy.BasePolicy` class.
    :param train_collector: the collector used for training.
    :type train_collector: :class:`~tianshou.data.Collector`
    :param test_collector: the collector used for testing.
    :type test_collector: :class:`~tianshou.data.Collector`
    :param int max_epoch: the maximum number of epochs for training. The
        training process might be finished before reaching the ``max_epoch``.
    :param int step_per_epoch: the number of environment frames collected per epoch.
    :param int step_per_collect: the number of episodes the collector would
            collect before the network update in ""episode"" collect mode(defalut),
            the number of frames the collector would collect in ""step"" collect
            mode.
    :param int step_per_collect: the number of episodes the collector would
        collect before the network update. In other words, collect some
        episodes and do one policy network update.
    :param int repeat_per_collect: the number of repeat time for policy
        learning, for example, set it to 2 means the policy needs to learn each
        given batch data twice.
    :param episode_per_test: the number of episodes for one policy evaluation.
    :type episode_per_test: int or list of ints
    :param int batch_size: the batch size of sample data, which is going to
        feed in the policy network.
    :param function train_fn: a hook called at the beginning of training in
        each epoch. It can be used to perform custom additional operations,
        with the signature ``f(num_epoch: int, step_idx: int) -> None``.
    :param function test_fn: a hook called at the beginning of testing in each
        epoch. It can be used to perform custom additional operations, with the
        signature ``f(num_epoch: int, step_idx: int) -> None``.
    :param function save_fn: a hook called when the undiscounted average mean
        reward in evaluation phase gets better, with the signature ``f(policy:
        BasePolicy) -> None``.
    :param function stop_fn: a function with signature ``f(mean_rewards: float)
        -> bool``, receives the average undiscounted returns of the testing
        result, returns a boolean which indicates whether reaching the goal.
    :param function reward_metric: a function with signature ``f(rewards: np.ndarray
        with shape (num_episode, agent_num)) -> np.ndarray with shape (num_episode,)``,
        used in multi-agent RL. We need to return a single scalar for each episode's
        result to monitor training in the multi-agent RL setting. This function
        specifies what is the desired metric, e.g., the reward of agent 1 or the
        average reward over all agents.
    :param torch.utils.tensorboard.SummaryWriter writer: a TensorBoard
        SummaryWriter; if None is given, it will not write logs to TensorBoard.
    :param int log_interval: the log interval of the writer.
    :param bool verbose: whether to print the information.
    :param bool test_in_train: whether to test in the training phase.
    :param string collect_method: specifies collect mode. Can be either ""episode""
        or ""step"".

    :return: See :func:`~tianshou.trainer.gather_info`.
    """"""
    env_step, gradient_step = 0, 0
    best_epoch, best_reward, best_reward_std = -1, -1.0, 0.0
    stat: Dict[str, MovAvg] = defaultdict(MovAvg)
    start_time = time.time()
    train_collector.reset_stat()
    test_collector.reset_stat()
    test_in_train = test_in_train and train_collector.policy == policy
    test_episode(policy, test_collector, test_fn, 0, episode_per_test,
                            writer, env_step)
    for epoch in range(1, 1 + max_epoch):
        # train
        policy.train()
        with tqdm.tqdm(
            total=step_per_epoch, desc=f""Epoch #{epoch}"", **tqdm_config
        ) as t:
            while t.n < t.total:
                if train_fn:
                    train_fn(epoch, env_step)
                result = train_collector.collect(**{""n_"" + collect_method : step_per_collect})
                if reward_metric:
                    result[""rews""] = reward_metric(result[""rews""])
                env_step += int(result[""n/st""])
                t.update(result[""n/st""])
                data = {
                    ""env_step"": str(env_step),
                    ""rew"": f""{result['rews'].mean():.2f}"",
                    ""len"": str(int(result[""lens""].mean())),
                    ""n/ep"": str(int(result[""n/ep""])),
                    ""n/st"": str(int(result[""n/st""])),
                }
                if writer and env_step % log_interval == 0:
                    writer.add_scalar(
                        ""train/rew"", result['rews'].mean(), global_step=env_step)
                    writer.add_scalar(
                        ""train/len"", result['lens'].mean(), global_step=env_step)
                if test_in_train and stop_fn and stop_fn(result[""rews""].mean()):
                    test_result = test_episode(
                        policy, test_collector, test_fn,
                        epoch, episode_per_test, writer, env_step)
                    if stop_fn(test_result[""rews""].mean()):
                        if save_fn:
                            save_fn(policy)
                        t.set_postfix(**data)
                        return gather_info(
                            start_time, train_collector, test_collector,
                            test_result[""rews""].mean(), test_result[""rews""].std())
                    else:
                        policy.train()
                losses = policy.update(
                    0, train_collector.buffer,
                    batch_size=batch_size, repeat=repeat_per_collect)
                train_collector.reset_buffer()
                step = max([1] + [
                    len(v) for v in losses.values() if isinstance(v, list)])
                gradient_step += step
                for k in losses.keys():
                    stat[k].add(losses[k])
                    data[k] = f""{stat[k].get():.6f}""
                    if writer and gradient_step % log_interval == 0:
                        writer.add_scalar(
                            k, stat[k].get(), global_step=gradient_step)
                t.set_postfix(**data)
            if t.n <= t.total:
                t.update()
        # test
        test_result = test_episode(policy, test_collector, test_fn, epoch,
                              episode_per_test, writer, env_step)
        if best_epoch == -1 or best_reward < result[""rews""].mean():
            best_reward, best_reward_std = result[""rews""].mean(), result[""rews""].std()
            best_epoch = epoch
            if save_fn:
                save_fn(policy)
        if verbose:
            print(f""Epoch #{epoch}: test_reward: {result['rews'].mean():.6f} ± ""
                  f""{result['rews'].std():.6f}, best_reward: {best_reward:.6f} ± ""
                  f""{best_reward_std:.6f} in #{best_epoch}"")
        if stop_fn and stop_fn(best_reward):
            break
    return gather_info(start_time, train_collector, test_collector,
                       best_reward, best_reward_std)
","def onpolicy_trainer(
    policy: BasePolicy,
    train_collector: Collector,
    test_collector: Collector,
    max_epoch: int,
    step_per_epoch: int,
    step_per_collect: int,
    repeat_per_collect: int,
    episode_per_test: int,
    batch_size: int,
    train_fn: Optional[Callable[[int, int], None]] = None,
    test_fn: Optional[Callable[[int, Optional[int]], None]] = None,
    stop_fn: Optional[Callable[[float], bool]] = None,
    save_fn: Optional[Callable[[BasePolicy], None]] = None,
    reward_metric: Optional[Callable[[np.ndarray], np.ndarray]] = None,
    writer: Optional[SummaryWriter] = None,
    log_interval: int = 1,
    verbose: bool = True,
    test_in_train: bool = True,
    collect_method: str = ""episode"",
) -> Dict[str, Union[float, str]]:
    """"""A wrapper for on-policy trainer procedure.

    The ""step"" in trainer means a policy network update.

    :param policy: an instance of the :class:`~tianshou.policy.BasePolicy` class.
    :param train_collector: the collector used for training.
    :type train_collector: :class:`~tianshou.data.Collector`
    :param test_collector: the collector used for testing.
    :type test_collector: :class:`~tianshou.data.Collector`
    :param int max_epoch: the maximum number of epochs for training. The
        training process might be finished before reaching the ``max_epoch``.
    :param int step_per_epoch: the number of environment frames collected per epoch.
    :param int step_per_collect: the number of episodes the collector would
            collect before the network update in ""episode"" collect mode(defalut),
            the number of frames the collector would collect in ""step"" collect
            mode.
    :param int step_per_collect: the number of episodes the collector would
        collect before the network update. In other words, collect some
        episodes and do one policy network update.
    :param int repeat_per_collect: the number of repeat time for policy
        learning, for example, set it to 2 means the policy needs to learn each
        given batch data twice.
    :param episode_per_test: the number of episodes for one policy evaluation.
    :type episode_per_test: int or list of ints
    :param int batch_size: the batch size of sample data, which is going to
        feed in the policy network.
    :param function train_fn: a hook called at the beginning of training in
        each epoch. It can be used to perform custom additional operations,
        with the signature ``f(num_epoch: int, step_idx: int) -> None``.
    :param function test_fn: a hook called at the beginning of testing in each
        epoch. It can be used to perform custom additional operations, with the
        signature ``f(num_epoch: int, step_idx: int) -> None``.
    :param function save_fn: a hook called when the undiscounted average mean
        reward in evaluation phase gets better, with the signature ``f(policy:
        BasePolicy) -> None``.
    :param function stop_fn: a function with signature ``f(mean_rewards: float)
        -> bool``, receives the average undiscounted returns of the testing
        result, returns a boolean which indicates whether reaching the goal.
    :param function reward_metric: a function with signature ``f(rewards: np.ndarray
        with shape (num_episode, agent_num)) -> np.ndarray with shape (num_episode,)``,
        used in multi-agent RL. We need to return a single scalar for each episode's
        result to monitor training in the multi-agent RL setting. This function
        specifies what is the desired metric, e.g., the reward of agent 1 or the
        average reward over all agents.
    :param torch.utils.tensorboard.SummaryWriter writer: a TensorBoard
        SummaryWriter; if None is given, it will not write logs to TensorBoard.
    :param int log_interval: the log interval of the writer.
    :param bool verbose: whether to print the information.
    :param bool test_in_train: whether to test in the training phase.
    :param string collect_method: specifies collect mode. Can be either ""episode""
        or ""step"".

    :return: See :func:`~tianshou.trainer.gather_info`.
    """"""
    env_step, gradient_step = 0, 0
    best_epoch, best_reward, best_reward_std = -1, -1.0, 0.0
    stat: Dict[str, MovAvg] = defaultdict(MovAvg)
    start_time = time.time()
    train_collector.reset_stat()
    test_collector.reset_stat()
    test_in_train = test_in_train and train_collector.policy == policy
    test_episode(policy, test_collector, test_fn, 0, episode_per_test,
                            writer, env_step)
    for epoch in range(1, 1 + max_epoch):
        # train
        policy.train()
        with tqdm.tqdm(
            total=step_per_epoch, desc=f""Epoch #{epoch}"", **tqdm_config
        ) as t:
            while t.n < t.total:
                if train_fn:
                    train_fn(epoch, env_step)
                result = train_collector.collect(**{""n_"" + collect_method : step_per_collect})
                if reward_metric:
                    result[""rews""] = reward_metric(result[""rews""])
                env_step += int(result[""n/st""])
                t.update(result[""n/st""])
                data = {
                    ""env_step"": str(env_step),
                    ""rew"": f""{result['rews'].mean():.2f}"",
                    ""len"": str(int(result[""lens""].mean())),
                    ""n/ep"": str(int(result[""n/ep""])),
                    ""n/st"": str(int(result[""n/st""])),
                }
                if writer and env_step % log_interval == 0:
                    writer.add_scalar(
                        ""train/rew"", result['rews'].mean(), global_step=env_step)
                    writer.add_scalar(
                        ""train/len"", result['lens'].mean(), global_step=env_step)
                if test_in_train and stop_fn and stop_fn(result[""rews""].mean()):
                    test_result = test_episode(
                        policy, test_collector, test_fn,
                        epoch, episode_per_test, writer, env_step)
                    if stop_fn(test_result[""rews""].mean()):
                        if save_fn:
                            save_fn(policy)
                        t.set_postfix(**data)
                        return gather_info(
                            start_time, train_collector, test_collector,
                            test_result[""rews""].mean(), test_result[""rews""].std())
                    else:
                        policy.train()
                losses = policy.update(
                    0, train_collector.buffer,
                    batch_size=batch_size, repeat=repeat_per_collect)
                train_collector.reset_buffer()
                step = max([1] + [
                    len(v) for v in losses.values() if isinstance(v, list)])
                gradient_step += step
                for k in losses.keys():
                    stat[k].add(losses[k])
                    data[k] = f""{stat[k].get():.6f}""
                    if writer and gradient_step % log_interval == 0:
                        writer.add_scalar(
                            k, stat[k].get(), global_step=gradient_step)
                t.set_postfix(**data)
            if t.n <= t.total:
                t.update()
        # test
        test_result = test_episode(policy, test_collector, test_fn, epoch,
                              episode_per_test, writer, env_step)
        if best_epoch == -1 or best_reward < result[""rews""].mean():
            best_reward, best_reward_std = result[""rews""].mean(), result[""rews""].std()
            best_epoch = epoch
            if save_fn:
                save_fn(policy)
        if verbose:
            print(f""Epoch #{epoch}: test_reward: {result['rews'].mean():.6f} ± ""
                  f""{result['rews'].std():.6f}, best_reward: {best_reward:.6f} ± ""
                  f""{best_reward_std:.6f} in #{best_epoch}"")
        if stop_fn and stop_fn(best_reward):
            break
    return gather_info(start_time, train_collector, test_collector,
                       best_reward, best_reward_std)
"
27463,"def wait_for_legacy_usage(callback):
    url = METADATA_URL + 'instance/legacy-endpoint-access/'
    last_etag = '0'
    counts = {'0.1': 0, 'v1beta1': 0}
    while True:
        r = requests.get(
            url,
            params={
                'last_etag': last_etag,
                'recursive': True,
                'wait_for_change': True
            },
            headers=METADATA_HEADERS)
        if r.status_code == 503:  # Metadata server unavailable
            print('Metadata server unavailable. Sleeping for 1 second.')
            time.sleep(1)
            continue
        if r.status_code == 404:  # Feature not yet supported
            print('Legacy endpoint access not yet supported. Sleeping for 1 hour.')
            time.sleep(3600)
            continue
        r.raise_for_status()

        last_etag = r.headers['etag']
        access_info = json.loads(r.text)
        if access_info != counts:
            diff = {
                ver: access_info[ver] - counts[ver] for ver in counts
            }
            counts = access_info
            callback(diff)

","def wait_for_legacy_usage(callback):
    url = '{}/instance/legacy-endpoint-access/'.format(METADATA_URL)
    last_etag = '0'
    counts = {'0.1': 0, 'v1beta1': 0}
    while True:
        r = requests.get(
            url,
            params={
                'last_etag': last_etag,
                'recursive': True,
                'wait_for_change': True
            },
            headers=METADATA_HEADERS)
        if r.status_code == 503:  # Metadata server unavailable
            print('Metadata server unavailable. Sleeping for 1 second.')
            time.sleep(1)
            continue
        if r.status_code == 404:  # Feature not yet supported
            print('Legacy endpoint access not yet supported. Sleeping for 1 hour.')
            time.sleep(3600)
            continue
        r.raise_for_status()

        last_etag = r.headers['etag']
        access_info = json.loads(r.text)
        if access_info != counts:
            diff = {
                ver: access_info[ver] - counts[ver] for ver in counts
            }
            counts = access_info
            callback(diff)

"
30290,"def http_request(method, url_suffix, params=None, data=None, headers=None, files=None):
    """"""
        A wrapper for requests lib to send our requests and handle requests and responses better.
    """"""
    res = requests.request(
        method,
        BASE_URL + url_suffix,
        verify=USE_SSL,
        params=params,
        data=data,
        headers=headers,
        files=files,
    )
    # Handle error responses gracefully
    if res.status_code in {401}:
        return_error(""Got unauthorized from the server. Check the credentials."")
    elif res.status_code in {404}:
        command = demisto.command()
        if command == 'threatstream-get-model-description' \
                or command == 'threatstream-get-indicators-by-model' \
                or command == 'threatstream-get-analysis-status' \
                or command == 'threatstream-analysis-report':
            # in order to prevent raising en error in case model/indicator/report was not found
            return {}
        else:
            return_error(""The resource not found. Check the endpoint."")
    elif res.status_code not in {200, 201, 202}:
        return_error(f""Error in API call to ThreatStream {res.status_code} - {res.text}"")

    return res.json()

","def http_request(method, url_suffix, params=None, data=None, headers=None, files=None):
    """"""
        A wrapper for requests lib to send our requests and handle requests and responses better.
    """"""
    res = requests.request(
        method,
        BASE_URL + url_suffix,
        verify=USE_SSL,
        params=params,
        data=data,
        headers=headers,
        files=files,
    )
    # Handle error responses gracefully
    if res.status_code in {401}:
        return_error(""Got unauthorized from the server. Check the credentials."")
    elif res.status_code in {404}:
        command = demisto.command()
        if command == 'threatstream-get-model-description' \
                or command == 'threatstream-get-indicators-by-model' \
                or command == 'threatstream-get-analysis-status' \
                or command == 'threatstream-analysis-report':
            # in order to prevent raising en error in case model/indicator/report was not found
            return {}
        else:
            return_error(""Resource not found. Check the endpoint."")
    elif res.status_code not in {200, 201, 202}:
        return_error(f""Error in API call to ThreatStream {res.status_code} - {res.text}"")

    return res.json()

"
43933,"def _hermite_coulomb(t, u, v, n, p, dr):
    """"""Evaluate Hermite integral needed to compute the nuclear attraction and electron repulsion
    integrals.

    These integrals are computed recursively starting from the Boys function
    [`Helgaker (1995) p817 `_]:


    .. math::

        R_{000}^n = (-2p)^n F_n(pR_{CP}^2),

    where :math:`F_n` is the Boys function, :math:`p` is computed from the exponents of the two
    Gaussian functions as :math:`p = \alpha + \beta`, and :math:`R_{CP}` is the distance between the
    center of the composite Gaussian centered at :math:`P` and the electrostatic potential at
    :math:`C`. The following recursive equations are used to compute the evaluate the higher order
    Hermite integrals

    .. math::

        R_{t+1, u, v}^n = t R_{t-1, u, v}^{n+1} + x R_{t, u, v}^{n+1}

        R_{t, u+1, v}^n = u R_{t, u-1, v}^{n+1} + y R_{t, u, v}^{n+1}

        R_{t, u, v+1}^n = v R_{t, u, v-1}^{n+1} + z R_{t, u, v}^{n+1}

    where :math:`x`, :math:`y` and :math:`z` are the Cartesian components of :math:`R_{CP}`.

    Args:
        t (integer): order of Hermite derivative in x
        u (integer): order of Hermite derivative in y
        v (float): order of Hermite derivative in z
        n (integer): order of the Boys function
        p (float): sum of the Gaussian exponents
        dr (array[float]): distance between the center of the composite Gaussian and the nucleus

    Returns:
        array[float]: value of the Hermite integral
    """"""
    x, y, z = dr[0], dr[1], dr[2]
    T = p * (dr ** 2).sum(axis=0)
    r = 0

    if t == u == v == 0:
        f = []
        for term in T.flatten():
            f.append(_boys(n, term))
        return ((-2 * p) ** n) * anp.array(f).reshape(T.shape)

    if t == u == 0:
        if v > 1:
            r = r + (v - 1) * _hermite_coulomb(t, u, v - 2, n + 1, p, dr)
        r = r + z * _hermite_coulomb(t, u, v - 1, n + 1, p, dr)
        return r

    if t == 0:
        if u > 1:
            r = r + (u - 1) * _hermite_coulomb(t, u - 2, v, n + 1, p, dr)
        r = r + y * _hermite_coulomb(t, u - 1, v, n + 1, p, dr)
        return r

    if t > 1:
        r = r + (t - 1) * _hermite_coulomb(t - 2, u, v, n + 1, p, dr)
    r = r + x * _hermite_coulomb(t - 1, u, v, n + 1, p, dr)
    return r

","def _hermite_coulomb(t, u, v, n, p, dr):
    """"""Evaluate Hermite integral needed to compute the nuclear attraction and electron repulsion
    integrals.

    These integrals are computed recursively starting from the Boys function
    [`Helgaker (1995) p817 `_]:


    .. math::

        R_{000}^n = (-2p)^n F_n(pR_{CP}^2),

    where :math:`F_n` is the Boys function, :math:`p` is computed from the exponents of the two
    Gaussian functions as :math:`p = \alpha + \beta`, and :math:`R_{CP}` is the distance between the
    center of the composite Gaussian centered at :math:`P` and the electrostatic potential at
    :math:`C`. The following recursive equations are used to compute the evaluate the higher order
    Hermite integrals

    .. math::

        R_{t+1, u, v}^n = t R_{t-1, u, v}^{n+1} + x R_{t, u, v}^{n+1},

        R_{t, u+1, v}^n = u R_{t, u-1, v}^{n+1} + y R_{t, u, v}^{n+1}

        R_{t, u, v+1}^n = v R_{t, u, v-1}^{n+1} + z R_{t, u, v}^{n+1}

    where :math:`x`, :math:`y` and :math:`z` are the Cartesian components of :math:`R_{CP}`.

    Args:
        t (integer): order of Hermite derivative in x
        u (integer): order of Hermite derivative in y
        v (float): order of Hermite derivative in z
        n (integer): order of the Boys function
        p (float): sum of the Gaussian exponents
        dr (array[float]): distance between the center of the composite Gaussian and the nucleus

    Returns:
        array[float]: value of the Hermite integral
    """"""
    x, y, z = dr[0], dr[1], dr[2]
    T = p * (dr ** 2).sum(axis=0)
    r = 0

    if t == u == v == 0:
        f = []
        for term in T.flatten():
            f.append(_boys(n, term))
        return ((-2 * p) ** n) * anp.array(f).reshape(T.shape)

    if t == u == 0:
        if v > 1:
            r = r + (v - 1) * _hermite_coulomb(t, u, v - 2, n + 1, p, dr)
        r = r + z * _hermite_coulomb(t, u, v - 1, n + 1, p, dr)
        return r

    if t == 0:
        if u > 1:
            r = r + (u - 1) * _hermite_coulomb(t, u - 2, v, n + 1, p, dr)
        r = r + y * _hermite_coulomb(t, u - 1, v, n + 1, p, dr)
        return r

    if t > 1:
        r = r + (t - 1) * _hermite_coulomb(t - 2, u, v, n + 1, p, dr)
    r = r + x * _hermite_coulomb(t - 1, u, v, n + 1, p, dr)
    return r

"
56255,"def apply_color_map(depth_map):
    depth_map *= 255
    depth_map = depth_map.astype(np.uint8)
    depth_map = cv2.applyColorMap(depth_map, cv2.COLORMAP_INFERNO)
    return depth_map

","def apply_color_map(depth_map):
    depth_map = (depth_map * 255.0).astype(np.uint8)
    return cv2.applyColorMap(depth_map, cv2.COLORMAP_INFERNO)

"
32407,"def generate_dbotscore(response: Dict) -> List:
    """"""Creates CommandResult object based on the contents of 'response' argument
        and provides DBotScore objects.

    Parameters
    ----------
    response : dict
        Object returned by ANYRUN API call in 'get_report' function.

    Returns
    -------
    List
        A list of CommandResults objects.
    """"""
    data = response.get('data', {})
    analysis = data.get('analysis', {})
    main_object = analysis.get('content', {}).get('mainObject', {})
    submission_type = main_object.get('type')
    submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type
    threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold()
    reputation_map = {
        ""shared"": Common.DBotScore.NONE,
        ""unknown"": Common.DBotScore.NONE,
        ""whitelisted"": Common.DBotScore.GOOD,
        ""malicious"": Common.DBotScore.BAD,
        ""suspicious"": Common.DBotScore.SUSPICIOUS
    }
    returned_data = []
    main_entity = None
    main_entity_type = None

    # Add the hash or URL first
    if submission_type == 'hash':
        hashes = main_object.get('hashes', {})
        info = main_object.get('info', {})
        file_type = info.get('file')
        exif = info.get('exif', {})
        main_entity = hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5')
        main_entity_type = FeedIndicatorType.File
        dbot_score = Common.DBotScore(
            indicator=hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5'),
            indicator_type=DBotScoreType.FILE,
            integration_name='ANYRUN',
            score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
        )
        returned_data.append(CommandResults(
            indicator=Common.File(
                dbot_score=dbot_score,
                md5=hashes.get('md5'),
                sha1=hashes.get('sha1'),
                sha256=hashes.get('sha256'),
                file_type=file_type,
                associated_file_names=exif.get('OriginalFileName')
            )
        ))

    else:
        main_entity = main_object.get('url')
        main_entity_type = FeedIndicatorType.URL
        url_outputs = {
            'Data': main_object.get('url')
        }
        dbot_score = Common.DBotScore(
            indicator=main_object.get('url'),
            indicator_type=DBotScoreType.URL,
            integration_name='ANYRUN',
            score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
        )
        if dbot_score.score >= 2:
            url_outputs['Malicious'] = {
                'Vendor': 'ANYRUN',
                'Description': threat_text
            }
        returned_data.append(CommandResults(
            outputs_prefix='URL',
            outputs_key_field=['Data'],
            outputs=url_outputs,
            indicator=Common.URL(
                url=main_object.get('url'),
                dbot_score=dbot_score,
            )
        ))

    # Check if network information is available in the report
    if 'network' in data:
        network_data = data.get('network')

        # Then add all the network-related indicators - 'connections'
        if 'connections' in network_data:
            connections = network_data.get('connections')
            for current_connection in connections:
                reputation = current_connection.get('Reputation')
                if reputation in reputation_map.keys():
                    current_dbot_score = Common.DBotScore(
                        indicator=current_connection.get('IP'),
                        indicator_type=DBotScoreType.IP,
                        integration_name='ANYRUN',
                        score=reputation_map[reputation]
                    )
                    relationships = [EntityRelationship(
                        name=EntityRelationship.Relationships.COMMUNICATED_WITH,
                        entity_a=main_entity,
                        entity_a_type=main_entity_type,
                        entity_b=current_connection.get('IP'),
                        entity_b_type=FeedIndicatorType.IP,
                        brand=""ANYRUN""
                    )]
                    ip_indicator = Common.IP(
                        ip=current_connection.get('IP'),
                        asn=current_connection.get('ASN'),
                        port=current_connection.get('Port'),
                        geo_country=current_connection.get('Country'),
                        dbot_score=current_dbot_score,
                        relationships=relationships
                    )
                    if current_connection.get('IP') not in [
                        x.indicator.ip for x in returned_data if isinstance(x.indicator, Common.IP)
                    ]:
                        returned_data.append(CommandResults(
                            readable_output=tableToMarkdown(
                                f""{current_connection.get('IP')}"",
                                [{
                                    ""Description"": f""This IP was observed after detonation of {main_entity} in ANYRUN""
                                }]
                            ),
                            indicator=ip_indicator,
                            relationships=relationships
                        ))

        # Then add all the network-related indicators - 'dnsRequests'
        if 'dnsRequests' in network_data:
            for current_dnsRequests in network_data.get('dnsRequests'):
                reputation = current_dnsRequests.get('Reputation')
                if reputation in reputation_map.keys():
                    current_dbot_score = Common.DBotScore(
                        indicator=current_dnsRequests.get('Domain'),
                        indicator_type=DBotScoreType.DOMAIN,
                        integration_name='ANYRUN',
                        score=reputation_map[reputation]
                    )
                    relationships = [EntityRelationship(
                        name=EntityRelationship.Relationships.COMMUNICATED_WITH,
                        entity_a=main_entity,
                        entity_a_type=main_entity_type,
                        entity_b=current_dnsRequests.get('Domain'),
                        entity_b_type=FeedIndicatorType.Domain,
                        brand=""ANYRUN""
                    )]
                    if ""IP"" in current_dnsRequests:
                        for ip in current_dnsRequests.get('IP', []):
                            relationships.append(
                                EntityRelationship(
                                    name=EntityRelationship.Relationships.RESOLVES_TO,
                                    entity_a=current_dnsRequests.get('Domain'),
                                    entity_a_type=FeedIndicatorType.Domain,
                                    entity_b=ip,
                                    entity_b_type=FeedIndicatorType.IP
                                )
                            )
                            domain_ip_dbot_score = Common.DBotScore(
                                indicator=ip,
                                indicator_type=DBotScoreType.IP,
                                integration_name=""ANYRUN"",
                                score=Common.DBotScore.NONE
                            )
                            domain_ip_indicator = Common.IP(
                                ip=ip,
                                dbot_score=domain_ip_dbot_score
                            )
                            returned_data.append(CommandResults(
                                indicator=domain_ip_indicator,
                                readable_output=tableToMarkdown(
                                    f""{ip}"",
                                    [{
                                        ""Description"": f""This IP was resovled from {current_dnsRequests.get('Domain')}""
                                    }]
                                )
                            ))
                    domain_indicator = Common.Domain(
                        domain=current_dnsRequests.get('Domain'),
                        dbot_score=current_dbot_score,
                        relationships=relationships
                    )
                    if current_dnsRequests.get('Domain') not in [
                        x.indicator.domain for x in returned_data if isinstance(x.indicator, Common.Domain)
                    ]:
                        returned_data.append(CommandResults(
                            readable_output=tableToMarkdown(
                                f""{current_dnsRequests.get('Domain')}"",
                                [{
                                    ""Description"": f""This domain was observed after detonation of {main_entity} in ANYRUN""
                                }]
                            ),
                            indicator=domain_indicator,
                            relationships=relationships
                        ))

        # Then add all the network-related indicators - 'httpRequests'
        if 'httpRequests' in network_data:
            for current_httpRequests in network_data.get('httpRequests'):
                reputation = current_httpRequests['Reputation']
                if reputation in reputation_map.keys():
                    current_dbot_score = Common.DBotScore(
                        indicator=current_httpRequests.get('URL'),
                        indicator_type=DBotScoreType.URL,
                        integration_name='ANYRUN',
                        score=reputation_map[reputation]
                    )
                    relationships = [EntityRelationship(
                        name=EntityRelationship.Relationships.COMMUNICATED_WITH,
                        entity_a=main_entity,
                        entity_a_type=main_entity_type,
                        entity_b=current_httpRequests.get('URL'),
                        entity_b_type=FeedIndicatorType.URL,
                        brand=""ANYRUN""
                    )]
                    url_indicator = Common.URL(
                        url=current_httpRequests.get('URL'),
                        geo_country=current_httpRequests.get('Country'),
                        port=current_httpRequests.get('Port'),
                        dbot_score=current_dbot_score,
                        relationships=relationships
                    )
                    if current_httpRequests.get('URL') not in [
                        x.indicator.url for x in returned_data if isinstance(x.indicator, Common.URL)
                    ]:
                        returned_data.append(CommandResults(
                            readable_output=tableToMarkdown(
                                f""{current_httpRequests.get('URL')}"",
                                [{
                                    ""Description"": f""This URL was observed after detonation of {main_entity} in ANYRUN""
                                }]
                            ),
                            indicator=url_indicator,
                            relationships=relationships
                        ))

    if 'mitre' in data:
        mitre_data = data.get('mitre')
        for item in mitre_data:
            relationships = [EntityRelationship(
                name=EntityRelationship.Relationships.RELATED_TO,
                entity_a=main_entity,
                entity_a_type=main_entity_type,
                entity_b=item.get('name'),
                entity_b_type='Attack Pattern'
            )]
            attack_indicator = Common.AttackPattern(
                stix_id=None,
                value=item.get('name'),
                mitre_id=item.get('id')
            )
            returned_data.append(CommandResults(
                readable_output=tableToMarkdown(
                    f""{item.get('name')}"",
                    [{
                        ""Description"": f""This Attack Pattern was observed after detonation of {main_entity} in ANYRUN""
                    }]
                ),
                indicator=attack_indicator,
                relationships=relationships
            ))

    return returned_data

","def generate_dbotscore(response: Dict) -> List:
    """"""Creates CommandResult object based on the contents of 'response' argument
        and provides DBotScore objects.

    Parameters
    ----------
    response : dict
        Object returned by ANYRUN API call in 'get_report' function.

    Returns
    -------
    List
        A list of CommandResults objects.
    """"""
    data = response.get('data', {})
    analysis = data.get('analysis', {})
    main_object = analysis.get('content', {}).get('mainObject', {})
    submission_type = main_object.get('type')
    submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type
    threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold()
    reputation_map = {
        ""shared"": Common.DBotScore.NONE,
        ""unknown"": Common.DBotScore.NONE,
        ""whitelisted"": Common.DBotScore.GOOD,
        ""malicious"": Common.DBotScore.BAD,
        ""suspicious"": Common.DBotScore.SUSPICIOUS
    }
    returned_data = []
    main_entity = None
    main_entity_type = None

    # Add the hash or URL first
    if submission_type == 'hash':
        hashes = main_object.get('hashes', {})
        info = main_object.get('info', {})
        file_type = info.get('file')
        exif = info.get('exif', {})
        main_entity = hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5')
        main_entity_type = FeedIndicatorType.File
        dbot_score = Common.DBotScore(
            indicator=hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5'),
            indicator_type=DBotScoreType.FILE,
            integration_name='ANYRUN',
            score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
        )
        returned_data.append(CommandResults(
            indicator=Common.File(
                dbot_score=dbot_score,
                md5=hashes.get('md5'),
                sha1=hashes.get('sha1'),
                sha256=hashes.get('sha256'),
                file_type=file_type,
                associated_file_names=exif.get('OriginalFileName')
            )
        ))

    else:
        main_entity = main_object.get('url')
        main_entity_type = FeedIndicatorType.URL
        url_outputs = {
            'Data': main_object.get('url')
        }
        dbot_score = Common.DBotScore(
            indicator=main_entity,
            indicator_type=DBotScoreType.URL,
            integration_name='ANYRUN',
            score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
        )
        if dbot_score.score >= 2:
            url_outputs['Malicious'] = {
                'Vendor': 'ANYRUN',
                'Description': threat_text
            }
        returned_data.append(CommandResults(
            outputs_prefix='URL',
            outputs_key_field=['Data'],
            outputs=url_outputs,
            indicator=Common.URL(
                url=main_object.get('url'),
                dbot_score=dbot_score,
            )
        ))

    # Check if network information is available in the report
    if 'network' in data:
        network_data = data.get('network')

        # Then add all the network-related indicators - 'connections'
        if 'connections' in network_data:
            connections = network_data.get('connections')
            for current_connection in connections:
                reputation = current_connection.get('Reputation')
                if reputation in reputation_map.keys():
                    current_dbot_score = Common.DBotScore(
                        indicator=current_connection.get('IP'),
                        indicator_type=DBotScoreType.IP,
                        integration_name='ANYRUN',
                        score=reputation_map[reputation]
                    )
                    relationships = [EntityRelationship(
                        name=EntityRelationship.Relationships.COMMUNICATED_WITH,
                        entity_a=main_entity,
                        entity_a_type=main_entity_type,
                        entity_b=current_connection.get('IP'),
                        entity_b_type=FeedIndicatorType.IP,
                        brand=""ANYRUN""
                    )]
                    ip_indicator = Common.IP(
                        ip=current_connection.get('IP'),
                        asn=current_connection.get('ASN'),
                        port=current_connection.get('Port'),
                        geo_country=current_connection.get('Country'),
                        dbot_score=current_dbot_score,
                        relationships=relationships
                    )
                    if current_connection.get('IP') not in [
                        x.indicator.ip for x in returned_data if isinstance(x.indicator, Common.IP)
                    ]:
                        returned_data.append(CommandResults(
                            readable_output=tableToMarkdown(
                                f""{current_connection.get('IP')}"",
                                [{
                                    ""Description"": f""This IP was observed after detonation of {main_entity} in ANYRUN""
                                }]
                            ),
                            indicator=ip_indicator,
                            relationships=relationships
                        ))

        # Then add all the network-related indicators - 'dnsRequests'
        if 'dnsRequests' in network_data:
            for current_dnsRequests in network_data.get('dnsRequests'):
                reputation = current_dnsRequests.get('Reputation')
                if reputation in reputation_map.keys():
                    current_dbot_score = Common.DBotScore(
                        indicator=current_dnsRequests.get('Domain'),
                        indicator_type=DBotScoreType.DOMAIN,
                        integration_name='ANYRUN',
                        score=reputation_map[reputation]
                    )
                    relationships = [EntityRelationship(
                        name=EntityRelationship.Relationships.COMMUNICATED_WITH,
                        entity_a=main_entity,
                        entity_a_type=main_entity_type,
                        entity_b=current_dnsRequests.get('Domain'),
                        entity_b_type=FeedIndicatorType.Domain,
                        brand=""ANYRUN""
                    )]
                    if ""IP"" in current_dnsRequests:
                        for ip in current_dnsRequests.get('IP', []):
                            relationships.append(
                                EntityRelationship(
                                    name=EntityRelationship.Relationships.RESOLVES_TO,
                                    entity_a=current_dnsRequests.get('Domain'),
                                    entity_a_type=FeedIndicatorType.Domain,
                                    entity_b=ip,
                                    entity_b_type=FeedIndicatorType.IP
                                )
                            )
                            domain_ip_dbot_score = Common.DBotScore(
                                indicator=ip,
                                indicator_type=DBotScoreType.IP,
                                integration_name=""ANYRUN"",
                                score=Common.DBotScore.NONE
                            )
                            domain_ip_indicator = Common.IP(
                                ip=ip,
                                dbot_score=domain_ip_dbot_score
                            )
                            returned_data.append(CommandResults(
                                indicator=domain_ip_indicator,
                                readable_output=tableToMarkdown(
                                    f""{ip}"",
                                    [{
                                        ""Description"": f""This IP was resovled from {current_dnsRequests.get('Domain')}""
                                    }]
                                )
                            ))
                    domain_indicator = Common.Domain(
                        domain=current_dnsRequests.get('Domain'),
                        dbot_score=current_dbot_score,
                        relationships=relationships
                    )
                    if current_dnsRequests.get('Domain') not in [
                        x.indicator.domain for x in returned_data if isinstance(x.indicator, Common.Domain)
                    ]:
                        returned_data.append(CommandResults(
                            readable_output=tableToMarkdown(
                                f""{current_dnsRequests.get('Domain')}"",
                                [{
                                    ""Description"": f""This domain was observed after detonation of {main_entity} in ANYRUN""
                                }]
                            ),
                            indicator=domain_indicator,
                            relationships=relationships
                        ))

        # Then add all the network-related indicators - 'httpRequests'
        if 'httpRequests' in network_data:
            for current_httpRequests in network_data.get('httpRequests'):
                reputation = current_httpRequests['Reputation']
                if reputation in reputation_map.keys():
                    current_dbot_score = Common.DBotScore(
                        indicator=current_httpRequests.get('URL'),
                        indicator_type=DBotScoreType.URL,
                        integration_name='ANYRUN',
                        score=reputation_map[reputation]
                    )
                    relationships = [EntityRelationship(
                        name=EntityRelationship.Relationships.COMMUNICATED_WITH,
                        entity_a=main_entity,
                        entity_a_type=main_entity_type,
                        entity_b=current_httpRequests.get('URL'),
                        entity_b_type=FeedIndicatorType.URL,
                        brand=""ANYRUN""
                    )]
                    url_indicator = Common.URL(
                        url=current_httpRequests.get('URL'),
                        geo_country=current_httpRequests.get('Country'),
                        port=current_httpRequests.get('Port'),
                        dbot_score=current_dbot_score,
                        relationships=relationships
                    )
                    if current_httpRequests.get('URL') not in [
                        x.indicator.url for x in returned_data if isinstance(x.indicator, Common.URL)
                    ]:
                        returned_data.append(CommandResults(
                            readable_output=tableToMarkdown(
                                f""{current_httpRequests.get('URL')}"",
                                [{
                                    ""Description"": f""This URL was observed after detonation of {main_entity} in ANYRUN""
                                }]
                            ),
                            indicator=url_indicator,
                            relationships=relationships
                        ))

    if 'mitre' in data:
        mitre_data = data.get('mitre')
        for item in mitre_data:
            relationships = [EntityRelationship(
                name=EntityRelationship.Relationships.RELATED_TO,
                entity_a=main_entity,
                entity_a_type=main_entity_type,
                entity_b=item.get('name'),
                entity_b_type='Attack Pattern'
            )]
            attack_indicator = Common.AttackPattern(
                stix_id=None,
                value=item.get('name'),
                mitre_id=item.get('id')
            )
            returned_data.append(CommandResults(
                readable_output=tableToMarkdown(
                    f""{item.get('name')}"",
                    [{
                        ""Description"": f""This Attack Pattern was observed after detonation of {main_entity} in ANYRUN""
                    }]
                ),
                indicator=attack_indicator,
                relationships=relationships
            ))

    return returned_data

"
25754,"def _get_coordinates(network, layouter=None):
    if layouter is not None or network.buses[['x', 'y']].isin([0]).all().all():
        coordinates = autogenerate_coordinates(network, layouter=layouter)
        return coordinates[""x""], coordinates[""y""]
    else:
        return network.buses[""x""], network.buses[""y""]

","def _get_coordinates(network, layouter=None):
    if layouter is not None or network.buses[['x', 'y']].isin([np.nan, 0]).all().all():
        coordinates = autogenerate_coordinates(network, layouter=layouter)
        return coordinates[""x""], coordinates[""y""]
    else:
        return network.buses[""x""], network.buses[""y""]

"
5656,"def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
              check_finite=True, bounds=(-np.inf, np.inf), method=None,
              jac=None, **kwargs):
    """"""
    Use non-linear least squares to fit a function, f, to data.

    Assumes ``ydata = f(xdata, *params) + eps``.

    Parameters
    ----------
    f : callable
        The model function, f(x, ...). It must take the independent
        variable as the first argument and the parameters to fit as
        separate remaining arguments.
    xdata : array_like or object
        The independent variable where the data is measured.
        Should usually be an M-length sequence or an (k,M)-shaped array for
        functions with k predictors, but can actually be any object.
    ydata : array_like
        The dependent data, a length M array - nominally ``f(xdata, ...)``.
    p0 : array_like, optional
        Initial guess for the parameters (length N). If None, then the
        initial values will all be 1 (if the number of parameters for the
        function can be determined using introspection, otherwise a
        ValueError is raised).
    sigma : None or M-length sequence or MxM array, optional
        Determines the uncertainty in `ydata`. If we define residuals as
        ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
        depends on its number of dimensions:

            - A 1-D `sigma` should contain values of standard deviations of
              errors in `ydata`. In this case, the optimized function is
              ``chisq = sum((r / sigma) ** 2)``.

            - A 2-D `sigma` should contain the covariance matrix of
              errors in `ydata`. In this case, the optimized function is
              ``chisq = r.T @ inv(sigma) @ r``.

              .. versionadded:: 0.19

        None (default) is equivalent of 1-D `sigma` filled with ones.
    absolute_sigma : bool, optional
        If True, `sigma` is used in an absolute sense and the estimated parameter
        covariance `pcov` reflects these absolute values.

        If False (default), only the relative magnitudes of the `sigma` values matter.
        The returned parameter covariance matrix `pcov` is based on scaling
        `sigma` by a constant factor. This constant is set by demanding that the
        reduced `chisq` for the optimal parameters `popt` when using the
        *scaled* `sigma` equals unity. In other words, `sigma` is scaled to
        match the sample variance of the residuals after the fit. Default is False
        Mathematically,
        ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
    check_finite : bool, optional
        If True, check that the input arrays do not contain nans of infs,
        and raise a ValueError if they do. Setting this parameter to
        False may silently produce nonsensical results if the input arrays
        do contain nans. Default is True.
    bounds : 2-tuple of array_like, optional
        Lower and upper bounds on parameters. Defaults to no bounds.
        Each element of the tuple must be either an array with the length equal
        to the number of parameters, or a scalar (in which case the bound is
        taken to be the same for all parameters). Use ``np.inf`` with an
        appropriate sign to disable bounds on all or some parameters.

        .. versionadded:: 0.17
    method : {'lm', 'trf', 'dogbox'}, optional
        Method to use for optimization. See `least_squares` for more details.
        Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
        provided. The method 'lm' won't work when the number of observations
        is less than the number of variables, use 'trf' or 'dogbox' in this
        case.

        .. versionadded:: 0.17
    jac : callable, string or None, optional
        Function with signature ``jac(x, ...)`` which computes the Jacobian
        matrix of the model function with respect to parameters as a dense
        array_like structure. It will be scaled according to provided `sigma`.
        If None (default), the Jacobian will be estimated numerically.
        String keywords for 'trf' and 'dogbox' methods can be used to select
        a finite difference scheme, see `least_squares`.

        .. versionadded:: 0.18
    kwargs
        Keyword arguments passed to `leastsq` for ``method='lm'`` or
        `least_squares` otherwise.

    Returns
    -------
    popt : array
        Optimal values for the parameters so that the sum of the squared
        residuals of ``f(xdata, *popt) - ydata`` is minimized.
    pcov : 2-D array
        The estimated covariance of popt. The diagonals provide the variance
        of the parameter estimate. To compute one standard deviation errors
        on the parameters use ``perr = np.sqrt(np.diag(pcov))``.

        How the `sigma` parameter affects the estimated covariance
        depends on `absolute_sigma` argument, as described above.

        If the Jacobian matrix at the solution doesn't have a full rank, then
        'lm' method returns a matrix filled with ``np.inf``, on the other hand
        'trf'  and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
        the covariance matrix.

    Raises
    ------
    ValueError
        if either `ydata` or `xdata` contain NaNs, or if incompatible options
        are used.

    RuntimeError
        if the least-squares minimization fails.

    OptimizeWarning
        if covariance of the parameters can not be estimated.

    See Also
    --------
    least_squares : Minimize the sum of squares of nonlinear functions.
    scipy.stats.linregress : Calculate a linear least squares regression for
                             two sets of measurements.

    Notes
    -----
    With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
    through `leastsq`. Note that this algorithm can only deal with
    unconstrained problems.

    Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
    the docstring of `least_squares` for more information.

    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> from scipy.optimize import curve_fit

    >>> def func(x, a, b, c):
    ...     return a * np.exp(-b * x) + c

    Define the data to be fit with some noise:

    >>> xdata = np.linspace(0, 4, 50)
    >>> y = func(xdata, 2.5, 1.3, 0.5)
    >>> np.random.seed(1729)
    >>> y_noise = 0.2 * np.random.normal(size=xdata.size)
    >>> ydata = y + y_noise
    >>> plt.plot(xdata, ydata, 'b-', label='data')

    Fit for the parameters a, b, c of the function `func`:

    >>> popt, pcov = curve_fit(func, xdata, ydata)
    >>> popt
    array([ 2.55423706,  1.35190947,  0.47450618])
    >>> plt.plot(xdata, func(xdata, *popt), 'r-',
    ...          label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))

    Constrain the optimization to the region of ``0 <= a <= 3``,
    ``0 <= b <= 1`` and ``0 <= c <= 0.5``:

    >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
    >>> popt
    array([ 2.43708906,  1.        ,  0.35015434])
    >>> plt.plot(xdata, func(xdata, *popt), 'g--',
    ...          label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))

    >>> plt.xlabel('x')
    >>> plt.ylabel('y')
    >>> plt.legend()
    >>> plt.show()

    """"""
    if p0 is None:
        # determine number of parameters by inspecting the function
        sig = _getfullargspec(f)
        args = sig.args
        if len(args) < 2:
            raise ValueError(""Unable to determine number of fit parameters."")
        n = len(args) - 1
    else:
        p0 = np.atleast_1d(p0)
        n = p0.size

    lb, ub = prepare_bounds(bounds, n)
    if p0 is None:
        p0 = _initialize_feasible(lb, ub)

    bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
    if method is None:
        if bounded_problem:
            method = 'trf'
        else:
            method = 'lm'

    if method == 'lm' and bounded_problem:
        raise ValueError(""Method 'lm' only works for unconstrained problems. ""
                         ""Use 'trf' or 'dogbox' instead."")

    # optimization may produce garbage for float32 inputs, cast them to float64

    # NaNs cannot be handled
    if check_finite:
        ydata = np.asarray_chkfinite(ydata, float)
    else:
        ydata = np.asarray(ydata, float)

    if isinstance(xdata, (list, tuple, np.ndarray)):
        # `xdata` is passed straight to the user-defined `f`, so allow
        # non-array_like `xdata`.
        if check_finite:
            xdata = np.asarray_chkfinite(xdata, float)
        else:
            xdata = np.asarray(xdata, float)

    if ydata.size == 0:
        raise ValueError(""`ydata` must not be empty!"")

    # Determine type of sigma
    if sigma is not None:
        sigma = np.asarray(sigma)

        # if 1-D, sigma are errors, define transform = 1/sigma
        if sigma.shape == (ydata.size, ):
            transform = 1.0 / sigma
        # if 2-D, sigma is the covariance matrix,
        # define transform = L such that L L^T = C
        elif sigma.shape == (ydata.size, ydata.size):
            try:
                # scipy.linalg.cholesky requires lower=True to return L L^T = A
                transform = cholesky(sigma, lower=True)
            except LinAlgError:
                raise ValueError(""`sigma` must be positive definite."")
        else:
            raise ValueError(""`sigma` has incorrect shape."")
    else:
        transform = None

    func = _wrap_func(f, xdata, ydata, transform)
    if callable(jac):
        jac = _wrap_jac(jac, xdata, transform)
    elif jac is None and method != 'lm':
        jac = '2-point'

    if 'args' in kwargs:
        # The specification for the model function `f` does not support
        # additional arguments. Refer to the `curve_fit` docstring for
        # acceptable call signatures of `f`.
        raise ValueError(""'args' is not a supported keyword argument."")

    if method == 'lm':
        # Remove full_output from kwargs, otherwise we're passing it in twice.
        return_full = kwargs.pop('full_output', False)
        res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
        popt, pcov, infodict, errmsg, ier = res
        ysize = len(infodict['fvec'])
        cost = np.sum(infodict['fvec'] ** 2)
        if ier not in [1, 2, 3, 4]:
            raise RuntimeError(""Optimal parameters not found: "" + errmsg)
    else:
        # Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
        if 'max_nfev' not in kwargs:
            kwargs['max_nfev'] = kwargs.pop('maxfev', None)

        res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
                            **kwargs)

        if not res.success:
            raise RuntimeError(""Optimal parameters not found: "" + res.message)

        ysize = len(res.fun)
        cost = 2 * res.cost  # res.cost is half sum of squares!
        popt = res.x

        # Do Moore-Penrose inverse discarding zero singular values.
        _, s, VT = svd(res.jac, full_matrices=False)
        threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
        s = s[s > threshold]
        VT = VT[:s.size]
        pcov = np.dot(VT.T / s**2, VT)
        return_full = False

    warn_cov = False
    if pcov is None:
        # indeterminate covariance
        pcov = zeros((len(popt), len(popt)), dtype=float)
        pcov.fill(inf)
        warn_cov = True
    elif not absolute_sigma:
        if ysize > p0.size:
            s_sq = cost / (ysize - p0.size)
            pcov = pcov * s_sq
        else:
            pcov.fill(inf)
            warn_cov = True

    if warn_cov:
        warnings.warn('Covariance of the parameters could not be estimated',
                      category=OptimizeWarning)

    if return_full:
        return popt, pcov, infodict, errmsg, ier
    else:
        return popt, pcov

","def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
              check_finite=True, bounds=(-np.inf, np.inf), method=None,
              jac=None, **kwargs):
    """"""
    Use non-linear least squares to fit a function, f, to data.

    Assumes ``ydata = f(xdata, *params) + eps``.

    Parameters
    ----------
    f : callable
        The model function, f(x, ...). It must take the independent
        variable as the first argument and the parameters to fit as
        separate remaining arguments.
    xdata : array_like or object
        The independent variable where the data is measured.
        Should usually be an M-length sequence or an (k,M)-shaped array for
        functions with k predictors, but can actually be any object.
    ydata : array_like
        The dependent data, a length M array - nominally ``f(xdata, ...)``.
    p0 : array_like, optional
        Initial guess for the parameters (length N). If None, then the
        initial values will all be 1 (if the number of parameters for the
        function can be determined using introspection, otherwise a
        ValueError is raised).
    sigma : None or M-length sequence or MxM array, optional
        Determines the uncertainty in `ydata`. If we define residuals as
        ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
        depends on its number of dimensions:

            - A 1-D `sigma` should contain values of standard deviations of
              errors in `ydata`. In this case, the optimized function is
              ``chisq = sum((r / sigma) ** 2)``.

            - A 2-D `sigma` should contain the covariance matrix of
              errors in `ydata`. In this case, the optimized function is
              ``chisq = r.T @ inv(sigma) @ r``.

              .. versionadded:: 0.19

        None (default) is equivalent of 1-D `sigma` filled with ones.
    absolute_sigma : bool, optional
        If True, `sigma` is used in an absolute sense and the estimated parameter
        covariance `pcov` reflects these absolute values.

        If False (default), only the relative magnitudes of the `sigma` values matter.
        The returned parameter covariance matrix `pcov` is based on scaling
        `sigma` by a constant factor. This constant is set by demanding that the
        reduced `chisq` for the optimal parameters `popt` when using the
        *scaled* `sigma` equals unity. In other words, `sigma` is scaled to
        match the sample variance of the residuals after the fit. Default is False.
        Mathematically,
        ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
    check_finite : bool, optional
        If True, check that the input arrays do not contain nans of infs,
        and raise a ValueError if they do. Setting this parameter to
        False may silently produce nonsensical results if the input arrays
        do contain nans. Default is True.
    bounds : 2-tuple of array_like, optional
        Lower and upper bounds on parameters. Defaults to no bounds.
        Each element of the tuple must be either an array with the length equal
        to the number of parameters, or a scalar (in which case the bound is
        taken to be the same for all parameters). Use ``np.inf`` with an
        appropriate sign to disable bounds on all or some parameters.

        .. versionadded:: 0.17
    method : {'lm', 'trf', 'dogbox'}, optional
        Method to use for optimization. See `least_squares` for more details.
        Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
        provided. The method 'lm' won't work when the number of observations
        is less than the number of variables, use 'trf' or 'dogbox' in this
        case.

        .. versionadded:: 0.17
    jac : callable, string or None, optional
        Function with signature ``jac(x, ...)`` which computes the Jacobian
        matrix of the model function with respect to parameters as a dense
        array_like structure. It will be scaled according to provided `sigma`.
        If None (default), the Jacobian will be estimated numerically.
        String keywords for 'trf' and 'dogbox' methods can be used to select
        a finite difference scheme, see `least_squares`.

        .. versionadded:: 0.18
    kwargs
        Keyword arguments passed to `leastsq` for ``method='lm'`` or
        `least_squares` otherwise.

    Returns
    -------
    popt : array
        Optimal values for the parameters so that the sum of the squared
        residuals of ``f(xdata, *popt) - ydata`` is minimized.
    pcov : 2-D array
        The estimated covariance of popt. The diagonals provide the variance
        of the parameter estimate. To compute one standard deviation errors
        on the parameters use ``perr = np.sqrt(np.diag(pcov))``.

        How the `sigma` parameter affects the estimated covariance
        depends on `absolute_sigma` argument, as described above.

        If the Jacobian matrix at the solution doesn't have a full rank, then
        'lm' method returns a matrix filled with ``np.inf``, on the other hand
        'trf'  and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
        the covariance matrix.

    Raises
    ------
    ValueError
        if either `ydata` or `xdata` contain NaNs, or if incompatible options
        are used.

    RuntimeError
        if the least-squares minimization fails.

    OptimizeWarning
        if covariance of the parameters can not be estimated.

    See Also
    --------
    least_squares : Minimize the sum of squares of nonlinear functions.
    scipy.stats.linregress : Calculate a linear least squares regression for
                             two sets of measurements.

    Notes
    -----
    With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
    through `leastsq`. Note that this algorithm can only deal with
    unconstrained problems.

    Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
    the docstring of `least_squares` for more information.

    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> from scipy.optimize import curve_fit

    >>> def func(x, a, b, c):
    ...     return a * np.exp(-b * x) + c

    Define the data to be fit with some noise:

    >>> xdata = np.linspace(0, 4, 50)
    >>> y = func(xdata, 2.5, 1.3, 0.5)
    >>> np.random.seed(1729)
    >>> y_noise = 0.2 * np.random.normal(size=xdata.size)
    >>> ydata = y + y_noise
    >>> plt.plot(xdata, ydata, 'b-', label='data')

    Fit for the parameters a, b, c of the function `func`:

    >>> popt, pcov = curve_fit(func, xdata, ydata)
    >>> popt
    array([ 2.55423706,  1.35190947,  0.47450618])
    >>> plt.plot(xdata, func(xdata, *popt), 'r-',
    ...          label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))

    Constrain the optimization to the region of ``0 <= a <= 3``,
    ``0 <= b <= 1`` and ``0 <= c <= 0.5``:

    >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
    >>> popt
    array([ 2.43708906,  1.        ,  0.35015434])
    >>> plt.plot(xdata, func(xdata, *popt), 'g--',
    ...          label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))

    >>> plt.xlabel('x')
    >>> plt.ylabel('y')
    >>> plt.legend()
    >>> plt.show()

    """"""
    if p0 is None:
        # determine number of parameters by inspecting the function
        sig = _getfullargspec(f)
        args = sig.args
        if len(args) < 2:
            raise ValueError(""Unable to determine number of fit parameters."")
        n = len(args) - 1
    else:
        p0 = np.atleast_1d(p0)
        n = p0.size

    lb, ub = prepare_bounds(bounds, n)
    if p0 is None:
        p0 = _initialize_feasible(lb, ub)

    bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
    if method is None:
        if bounded_problem:
            method = 'trf'
        else:
            method = 'lm'

    if method == 'lm' and bounded_problem:
        raise ValueError(""Method 'lm' only works for unconstrained problems. ""
                         ""Use 'trf' or 'dogbox' instead."")

    # optimization may produce garbage for float32 inputs, cast them to float64

    # NaNs cannot be handled
    if check_finite:
        ydata = np.asarray_chkfinite(ydata, float)
    else:
        ydata = np.asarray(ydata, float)

    if isinstance(xdata, (list, tuple, np.ndarray)):
        # `xdata` is passed straight to the user-defined `f`, so allow
        # non-array_like `xdata`.
        if check_finite:
            xdata = np.asarray_chkfinite(xdata, float)
        else:
            xdata = np.asarray(xdata, float)

    if ydata.size == 0:
        raise ValueError(""`ydata` must not be empty!"")

    # Determine type of sigma
    if sigma is not None:
        sigma = np.asarray(sigma)

        # if 1-D, sigma are errors, define transform = 1/sigma
        if sigma.shape == (ydata.size, ):
            transform = 1.0 / sigma
        # if 2-D, sigma is the covariance matrix,
        # define transform = L such that L L^T = C
        elif sigma.shape == (ydata.size, ydata.size):
            try:
                # scipy.linalg.cholesky requires lower=True to return L L^T = A
                transform = cholesky(sigma, lower=True)
            except LinAlgError:
                raise ValueError(""`sigma` must be positive definite."")
        else:
            raise ValueError(""`sigma` has incorrect shape."")
    else:
        transform = None

    func = _wrap_func(f, xdata, ydata, transform)
    if callable(jac):
        jac = _wrap_jac(jac, xdata, transform)
    elif jac is None and method != 'lm':
        jac = '2-point'

    if 'args' in kwargs:
        # The specification for the model function `f` does not support
        # additional arguments. Refer to the `curve_fit` docstring for
        # acceptable call signatures of `f`.
        raise ValueError(""'args' is not a supported keyword argument."")

    if method == 'lm':
        # Remove full_output from kwargs, otherwise we're passing it in twice.
        return_full = kwargs.pop('full_output', False)
        res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
        popt, pcov, infodict, errmsg, ier = res
        ysize = len(infodict['fvec'])
        cost = np.sum(infodict['fvec'] ** 2)
        if ier not in [1, 2, 3, 4]:
            raise RuntimeError(""Optimal parameters not found: "" + errmsg)
    else:
        # Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
        if 'max_nfev' not in kwargs:
            kwargs['max_nfev'] = kwargs.pop('maxfev', None)

        res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
                            **kwargs)

        if not res.success:
            raise RuntimeError(""Optimal parameters not found: "" + res.message)

        ysize = len(res.fun)
        cost = 2 * res.cost  # res.cost is half sum of squares!
        popt = res.x

        # Do Moore-Penrose inverse discarding zero singular values.
        _, s, VT = svd(res.jac, full_matrices=False)
        threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
        s = s[s > threshold]
        VT = VT[:s.size]
        pcov = np.dot(VT.T / s**2, VT)
        return_full = False

    warn_cov = False
    if pcov is None:
        # indeterminate covariance
        pcov = zeros((len(popt), len(popt)), dtype=float)
        pcov.fill(inf)
        warn_cov = True
    elif not absolute_sigma:
        if ysize > p0.size:
            s_sq = cost / (ysize - p0.size)
            pcov = pcov * s_sq
        else:
            pcov.fill(inf)
            warn_cov = True

    if warn_cov:
        warnings.warn('Covariance of the parameters could not be estimated',
                      category=OptimizeWarning)

    if return_full:
        return popt, pcov, infodict, errmsg, ier
    else:
        return popt, pcov

"
45431,"def test_setattr_axes():
    # Test that setting .index or .columns does not warn
    df = pd.DataFrame(np.random.randn(2, 2))
    with pytest.warns(None):
        df.index = [""foo"", ""bar""]
        df.columns = [9, 10]

    # Check that ensure_index was called
    pandas.testing.assert_index_equal(df.index, pandas.Index([""foo"", ""bar""]))
    pandas.testing.assert_index_equal(df.columns, pandas.Index([9, 10]))
","def test_setattr_axes():
    # Test that setting .index or .columns does not warn
    df = pd.DataFrame([[1, 2], [3, 4]])
    with pytest.warns(None):
        df.index = [""foo"", ""bar""]
        df.columns = [9, 10]

    # Check that ensure_index was called
    pandas.testing.assert_index_equal(df.index, pandas.Index([""foo"", ""bar""]))
    pandas.testing.assert_index_equal(df.columns, pandas.Index([9, 10]))
"
5806,"def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
    """"""
    Compute a double integral.

    Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
    and ``y = gfun(x)..hfun(x)``.

    Parameters
    ----------
    func : callable
        A Python function or method of at least two variables: y must be the
        first argument and x the second argument.
    a, b : float
        The limits of integration in x: `a` < `b`
    gfun : callable or float
        The lower boundary curve in y which is a function taking a single
        floating point argument (x) and returning a floating point result
        or a float indicating a constant boundary curve.
    hfun : callable or float
        The upper boundary curve in y (same requirements as `gfun`).
    args : sequence, optional
        Extra arguments to pass to `func`.
    epsabs : float, optional
        Absolute tolerance passed directly to the inner 1-D quadrature
        integration. Default is 1.49e-8. ``dblquad`` tries to obtain
        an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
        where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
        to ``hfun(x)``, and ``result`` is the numerical approximation.
        See `epsrel` below.
    epsrel : float, optional
        Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
        If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
        and ``50 * (machine epsilon)``. See `epsabs` above.

    Returns
    -------
    y : float
        The resultant integral.
    abserr : float
        An estimate of the error.

    See Also
    --------
    quad : single integral
    tplquad : triple integral
    nquad : N-dimensional integrals
    fixed_quad : fixed-order Gaussian quadrature
    quadrature : adaptive Gaussian quadrature
    odeint : ODE integrator
    ode : ODE integrator
    simpson : integrator for sampled data
    romb : integrator for sampled data
    scipy.special : for coefficients and roots of orthogonal polynomials

    Examples
    --------
    Compute the double integral of ``x * y**2`` over the box
    ``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
    That is, :math:`\\int^{x=2}_{x=0} \\int^{y=1}_{y=0} x y^2 dy dx`.

    >>> from scipy import integrate
    >>> f = lambda y, x: x*y**2
    >>> integrate.dblquad(f, 0, 2, 0, 1)
        (0.6666666666666667, 7.401486830834377e-15)

    Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1 dy dx`.

    >>> from numpy import pi, cos, sin
    >>> f = lambda y, x: 1
    >>> integrate.dblquad(f, 0, pi/4, sin, cos)
        (0.41421356237309503, 1.1083280054755938e-14)

    Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=x}_{y=2-x} a x y dy dx` for :math:`a=1, 3`.

    >>> f = lambda y, x, a: a*x*y
    >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,))
        (0.33333333333333337, 5.551115123125783e-15)
    >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,))
        (0.9999999999999999, 1.6653345369377348e-14)

    """"""

    def temp_ranges(*args):
        return [gfun(args[0]) if callable(gfun) else gfun,
                hfun(args[0]) if callable(hfun) else hfun]

    return nquad(func, [temp_ranges, [a, b]], args=args,
            opts={""epsabs"": epsabs, ""epsrel"": epsrel})

","def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
    """"""
    Compute a double integral.

    Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
    and ``y = gfun(x)..hfun(x)``.

    Parameters
    ----------
    func : callable
        A Python function or method of at least two variables: y must be the
        first argument and x the second argument.
    a, b : float
        The limits of integration in x: `a` < `b`
    gfun : callable or float
        The lower boundary curve in y which is a function taking a single
        floating point argument (x) and returning a floating point result
        or a float indicating a constant boundary curve.
    hfun : callable or float
        The upper boundary curve in y (same requirements as `gfun`).
    args : sequence, optional
        Extra arguments to pass to `func`.
    epsabs : float, optional
        Absolute tolerance passed directly to the inner 1-D quadrature
        integration. Default is 1.49e-8. ``dblquad`` tries to obtain
        an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
        where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
        to ``hfun(x)``, and ``result`` is the numerical approximation.
        See `epsrel` below.
    epsrel : float, optional
        Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
        If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
        and ``50 * (machine epsilon)``. See `epsabs` above.

    Returns
    -------
    y : float
        The resultant integral.
    abserr : float
        An estimate of the error.

    See Also
    --------
    quad : single integral
    tplquad : triple integral
    nquad : N-dimensional integrals
    fixed_quad : fixed-order Gaussian quadrature
    quadrature : adaptive Gaussian quadrature
    odeint : ODE integrator
    ode : ODE integrator
    simpson : integrator for sampled data
    romb : integrator for sampled data
    scipy.special : for coefficients and roots of orthogonal polynomials

    Examples
    --------
    Compute the double integral of ``x * y**2`` over the box
    ``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
    That is, :math:`\\int^{x=2}_{x=0} \\int^{y=1}_{y=0} x y^2 \,dy \,dx`.

    >>> from scipy import integrate
    >>> f = lambda y, x: x*y**2
    >>> integrate.dblquad(f, 0, 2, 0, 1)
        (0.6666666666666667, 7.401486830834377e-15)

    Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1 dy dx`.

    >>> from numpy import pi, cos, sin
    >>> f = lambda y, x: 1
    >>> integrate.dblquad(f, 0, pi/4, sin, cos)
        (0.41421356237309503, 1.1083280054755938e-14)

    Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=x}_{y=2-x} a x y dy dx` for :math:`a=1, 3`.

    >>> f = lambda y, x, a: a*x*y
    >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,))
        (0.33333333333333337, 5.551115123125783e-15)
    >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,))
        (0.9999999999999999, 1.6653345369377348e-14)

    """"""

    def temp_ranges(*args):
        return [gfun(args[0]) if callable(gfun) else gfun,
                hfun(args[0]) if callable(hfun) else hfun]

    return nquad(func, [temp_ranges, [a, b]], args=args,
            opts={""epsabs"": epsabs, ""epsrel"": epsrel})

"
41167,"def _determine_needs_init_layer(init_state: value.ProductState):
    """"""Helper function to determine whether we need the initial layer of
    rotations used in `measure_grouped_settings`.
    """"""
    for _, st in init_state:
        if st != value.KET_ZERO:
            return True

    return False

","def _needs_init_layer(init_state: value.ProductState):
    """"""Helper function to determine whether we need the initial layer of
    rotations used in `measure_grouped_settings`.
    """"""
    for _, st in init_state:
        if st != value.KET_ZERO:
            return True

    return False

"
7164,"def tvl1(I0, I1, dt=0.2, lambda_=15, tau=0.3, nwarp=5, niter=10,
         tol=1e-4, prefilter=False):
    """"""Coarse to fine TV-L1 optical flow estimator.

    TV-L1 ia popular algorithm for optical flow estimation intrudced
    by Zack et al. [1]_, improved in [2]_ and detailed in [3]_.

    Parameters
    ----------
    I0 : ~numpy.ndarray
        The first gray scale image of the sequence.
    I1 : ~numpy.ndarray
        The second gray scale image of the sequence.
    dt : float
        Time step of the numerical scheme. Convergence is proved for
        values dt < 0.125, but it can be larger for faster
        convergence.
    lambda_ : float
        Attachement parameter. The smaller this parameter is,
        the smoother is the solutions.
    tau : float
        Tightness parameter. It should have a small value in order to
        maintain attachement and regularization parts in
        correspondence.
    nwarp : int
        Number of times I1 is warped.
    niter : int
        Number of fixed point iteration.
    tol : float
        Tolerance used as stopping criterion based on the L² distance
        between two consecutive values of (u, v).
    prefilter : bool
        whether to prefilter the estimated optical flow before each
        image warp.

    Returns
    -------
    flow : tuple[~numpy.ndarray]
        The estimated optical flow.

    References
    ----------
    .. [1] Zach, C., Pock, T., & Bischof, H. (2007, September). A
       duality based approach for realtime TV-L 1 optical flow. In Joint
       pattern recognition symposium (pp. 214-223). Springer, Berlin,
       Heidelberg.
    .. [2] Wedel, A., Pock, T., Zach, C., Bischof, H., & Cremers,
       D. (2009). An improved algorithm for TV-L 1 optical flow. In
       Statistical and geometrical approaches to visual motion analysis
       (pp. 23-45). Springer, Berlin, Heidelberg.
    .. [3] Pérez, J. S., Meinhardt-Llopis, E., & Facciolo,
       G. (2013). TV-L1 optical flow estimation. Image Processing On
       Line, 2013, 137-150.

    Examples
    --------
    >>> from skimage.color import rgb2gray
    >>> from skimage.data import stereo_motorcycle
    >>> from skimage.registration import tvl1
    >>> I0, I1, disp = stereo_motorcycle()
    >>> # --- Convert the images to gray level: color is not supported.
    >>> I0 = rgb2gray(I0)
    >>> I1 = rgb2gray(I1)
    >>> flow = tvl1(I1, I0)

    """"""

    solver = partial(_tvl1, dt=dt, lambda_=lambda_, tau=tau,
                     nwarp=nwarp, niter=niter, tol=tol,
                     prefilter=prefilter)

    return coarse_to_fine(I0, I1, solver)
","def tvl1(I0, I1, dt=0.2, lambda_=15, tau=0.3, nwarp=5, niter=10,
         tol=1e-4, prefilter=False):
    """"""Coarse to fine TV-L1 optical flow estimator.

    TV-L1 ia popular algorithm for optical flow estimation intrudced
    by Zack et al. [1]_, improved in [2]_ and detailed in [3]_.

    Parameters
    ----------
    I0 : ~numpy.ndarray
        The first gray scale image of the sequence.
    I1 : ~numpy.ndarray
        The second gray scale image of the sequence.
    dt : float
        Time step of the numerical scheme. Convergence is proved for
        values dt < 0.125, but it can be larger for faster
        convergence.
    lambda_ : float
        Attachement parameter. The smaller this parameter is,
        the smoother the found solution will be.
    tau : float
        Tightness parameter. It should have a small value in order to
        maintain attachement and regularization parts in
        correspondence.
    nwarp : int
        Number of times I1 is warped.
    niter : int
        Number of fixed point iteration.
    tol : float
        Tolerance used as stopping criterion based on the L² distance
        between two consecutive values of (u, v).
    prefilter : bool
        whether to prefilter the estimated optical flow before each
        image warp.

    Returns
    -------
    flow : tuple[~numpy.ndarray]
        The estimated optical flow.

    References
    ----------
    .. [1] Zach, C., Pock, T., & Bischof, H. (2007, September). A
       duality based approach for realtime TV-L 1 optical flow. In Joint
       pattern recognition symposium (pp. 214-223). Springer, Berlin,
       Heidelberg.
    .. [2] Wedel, A., Pock, T., Zach, C., Bischof, H., & Cremers,
       D. (2009). An improved algorithm for TV-L 1 optical flow. In
       Statistical and geometrical approaches to visual motion analysis
       (pp. 23-45). Springer, Berlin, Heidelberg.
    .. [3] Pérez, J. S., Meinhardt-Llopis, E., & Facciolo,
       G. (2013). TV-L1 optical flow estimation. Image Processing On
       Line, 2013, 137-150.

    Examples
    --------
    >>> from skimage.color import rgb2gray
    >>> from skimage.data import stereo_motorcycle
    >>> from skimage.registration import tvl1
    >>> I0, I1, disp = stereo_motorcycle()
    >>> # --- Convert the images to gray level: color is not supported.
    >>> I0 = rgb2gray(I0)
    >>> I1 = rgb2gray(I1)
    >>> flow = tvl1(I1, I0)

    """"""

    solver = partial(_tvl1, dt=dt, lambda_=lambda_, tau=tau,
                     nwarp=nwarp, niter=niter, tol=tol,
                     prefilter=prefilter)

    return coarse_to_fine(I0, I1, solver)
"
1524,"def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
                              max_iter=100, tol=1e-4, verbose=0,
                              solver='lbfgs', coef=None,
                              class_weight=None, dual=False, penalty='l2',
                              intercept_scaling=1., multi_class='auto',
                              random_state=None, check_input=True,
                              max_squared_sum=None, sample_weight=None,
                              l1_ratio=None):
    """"""Compute a Logistic Regression model for a list of regularization
    parameters.

    This is an implementation that uses the result of the previous model
    to speed up computations along the set of solutions, making it faster
    than sequentially calling LogisticRegression for the different parameters.
    Note that there will be no speedup with liblinear solver, since it does
    not handle warm-starting.

    Read more in the :ref:`User Guide `.

    Parameters
    ----------
    X : array-like or sparse matrix, shape (n_samples, n_features)
        Input data.

    y : array-like, shape (n_samples,) or (n_samples, n_targets)
        Input data, target values.

    pos_class : int, None
        The class with respect to which we perform a one-vs-all fit.
        If None, then it is assumed that the given problem is binary.

    Cs : int | array-like, shape (n_cs,)
        List of values for the regularization parameter or integer specifying
        the number of regularization parameters that should be used. In this
        case, the parameters will be chosen in a logarithmic scale between
        1e-4 and 1e4.

    fit_intercept : bool
        Whether to fit an intercept for the model. In this case the shape of
        the returned array is (n_cs, n_features + 1).

    max_iter : int
        Maximum number of iterations for the solver.

    tol : float
        Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
        will stop when ``max{|g_i | i = 1, ..., n} <= tol``
        where ``g_i`` is the i-th component of the gradient.

    verbose : int
        For the liblinear and lbfgs solvers set verbose to any positive
        number for verbosity.

    solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
        Numerical solver to use.

    coef : array-like, shape (n_features,), default None
        Initialization value for coefficients of logistic regression.
        Useless for liblinear solver.

    class_weight : dict or 'balanced', optional
        Weights associated with classes in the form ``{class_label: weight}``.
        If not given, all classes are supposed to have weight one.

        The ""balanced"" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``.

        Note that these weights will be multiplied with sample_weight (passed
        through the fit method) if sample_weight is specified.

    dual : bool
        Dual or primal formulation. Dual formulation is only implemented for
        l2 penalty with liblinear solver. Prefer dual=False when
        n_samples > n_features.

    penalty : str, 'l1', 'l2', or 'elasticnet'
        Used to specify the norm used in the penalization. The 'newton-cg',
        'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
        only supported by the 'saga' solver.

    intercept_scaling : float, default 1.
        Useful only when the solver 'liblinear' is used
        and self.fit_intercept is set to True. In this case, x becomes
        [x, self.intercept_scaling],
        i.e. a ""synthetic"" feature with constant value equal to
        intercept_scaling is appended to the instance vector.
        The intercept becomes ``intercept_scaling * synthetic_feature_weight``.

        Note! the synthetic feature weight is subject to l1/l2 regularization
        as all other features.
        To lessen the effect of regularization on synthetic feature weight
        (and therefore on the intercept) intercept_scaling has to be increased.

    multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
        If the option chosen is 'ovr', then a binary problem is fit for each
        label. For 'multinomial' the loss minimised is the multinomial loss fit
        across the entire probability distribution, *even when the data is
        binary*. 'multinomial' is unavailable when solver='liblinear'.
        'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
        and otherwise selects 'multinomial'.

        .. versionadded:: 0.18
           Stochastic Average Gradient descent solver for 'multinomial' case.
        .. versionchanged:: 0.22
            Default changed from 'ovr' to 'auto' in 0.22.

    random_state : int, RandomState instance or None, optional (default=None)
        Used when ``solver`` == 'sag' or 'liblinear' to shuffle the data.
        See :term:`Glossary ` for details.

    check_input : bool, default True
        If False, the input arrays X and y will not be checked.

    max_squared_sum : float, default None
        Maximum squared sum of X over samples. Used only in SAG solver.
        If None, it will be computed, going through all the samples.
        The value should be precomputed to speed up cross validation.

    sample_weight : array-like, shape(n_samples,) optional
        Array of weights that are assigned to individual samples.
        If not provided, then each sample is given unit weight.

    l1_ratio : float or None, optional (default=None)
        The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
        used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
        to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
        to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
        combination of L1 and L2.

    Returns
    -------
    coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
        List of coefficients for the Logistic Regression model. If
        fit_intercept is set to True then the second dimension will be
        n_features + 1, where the last item represents the intercept. For
        ``multiclass='multinomial'``, the shape is (n_classes, n_cs,
        n_features) or (n_classes, n_cs, n_features + 1).

    Cs : ndarray
        Grid of Cs used for cross-validation.

    n_iter : array, shape (n_cs,)
        Actual number of iteration for each Cs.

    Notes
    -----
    You might get slightly different results with the solver liblinear than
    with the others since this uses LIBLINEAR which penalizes the intercept.

    .. versionchanged:: 0.19
        The ""copy"" parameter was removed.
    """"""
    if isinstance(Cs, numbers.Integral):
        Cs = np.logspace(-4, 4, Cs)

    solver = _check_solver(solver, penalty, dual)

    # Preprocessing.
    if check_input:
        X = check_array(X, accept_sparse='csr', dtype=np.float64,
                        accept_large_sparse=solver != 'liblinear')
        y = check_array(y, ensure_2d=False, dtype=None)
        check_consistent_length(X, y)
    _, n_features = X.shape

    classes = np.unique(y)
    random_state = check_random_state(random_state)

    multi_class = _check_multi_class(multi_class, solver, len(classes))
    if pos_class is None and multi_class != 'multinomial':
        if (classes.size > 2):
            raise ValueError('To fit OvR, use the pos_class argument')
        # np.unique(y) gives labels in sorted order.
        pos_class = classes[1]

    # If sample weights exist, convert them to array (support for lists)
    # and check length
    # Otherwise set them to 1 for all examples
    sample_weight = _check_sample_weight(sample_weight, X,
                                         dtype=X.dtype)

    # If class_weights is a dict (provided by the user), the weights
    # are assigned to the original labels. If it is ""balanced"", then
    # the class_weights are assigned after masking the labels with a OvR.
    le = LabelEncoder()
    if isinstance(class_weight, dict) or multi_class == 'multinomial':
        class_weight_ = compute_class_weight(class_weight, classes, y)
        sample_weight *= class_weight_[le.fit_transform(y)]

    # For doing a ovr, we need to mask the labels first. for the
    # multinomial case this is not necessary.
    if multi_class == 'ovr':
        w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
        mask_classes = np.array([-1, 1])
        mask = (y == pos_class)
        y_bin = np.ones(y.shape, dtype=X.dtype)
        y_bin[~mask] = -1.
        # for compute_class_weight

        if class_weight == ""balanced"":
            class_weight_ = compute_class_weight(class_weight, mask_classes,
                                                 y_bin)
            sample_weight *= class_weight_[le.fit_transform(y_bin)]

    else:
        if solver not in ['sag', 'saga']:
            lbin = LabelBinarizer()
            Y_multi = lbin.fit_transform(y)
            if Y_multi.shape[1] == 1:
                Y_multi = np.hstack([1 - Y_multi, Y_multi])
        else:
            # SAG multinomial solver needs LabelEncoder, not LabelBinarizer
            le = LabelEncoder()
            Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)

        w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
                      order='F', dtype=X.dtype)

    if coef is not None:
        # it must work both giving the bias term and not
        if multi_class == 'ovr':
            if coef.size not in (n_features, w0.size):
                raise ValueError(
                    'Initialization coef is of shape %d, expected shape '
                    '%d or %d' % (coef.size, n_features, w0.size))
            w0[:coef.size] = coef
        else:
            # For binary problems coef.shape[0] should be 1, otherwise it
            # should be classes.size.
            n_classes = classes.size
            if n_classes == 2:
                n_classes = 1

            if (coef.shape[0] != n_classes or
                    coef.shape[1] not in (n_features, n_features + 1)):
                raise ValueError(
                    'Initialization coef is of shape (%d, %d), expected '
                    'shape (%d, %d) or (%d, %d)' % (
                        coef.shape[0], coef.shape[1], classes.size,
                        n_features, classes.size, n_features + 1))

            if n_classes == 1:
                w0[0, :coef.shape[1]] = -coef
                w0[1, :coef.shape[1]] = coef
            else:
                w0[:, :coef.shape[1]] = coef

    if multi_class == 'multinomial':
        # scipy.optimize.minimize and newton-cg accepts only
        # ravelled parameters.
        if solver in ['lbfgs', 'newton-cg']:
            w0 = w0.ravel()
        target = Y_multi
        if solver == 'lbfgs':
            func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
        elif solver == 'newton-cg':
            func = lambda x, *args: _multinomial_loss(x, *args)[0]
            grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
            hess = _multinomial_grad_hess
        warm_start_sag = {'coef': w0.T}
    else:
        target = y_bin
        if solver == 'lbfgs':
            func = _logistic_loss_and_grad
        elif solver == 'newton-cg':
            func = _logistic_loss
            grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
            hess = _logistic_grad_hess
        warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}

    coefs = list()
    n_iter = np.zeros(len(Cs), dtype=np.int32)
    for i, C in enumerate(Cs):
        if solver == 'lbfgs':
            iprint = [-1, 50, 1, 100, 101][
                np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
            opt_res = optimize.minimize(
                func, w0, method=""L-BFGS-B"", jac=True,
                args=(X, target, 1. / C, sample_weight),
                options={""iprint"": iprint, ""gtol"": tol, ""maxiter"": max_iter}
            )
            n_iter_i = _check_optimize_result(
                solver, opt_res, max_iter,
                extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
            w0, loss = opt_res.x, opt_res.fun
        elif solver == 'newton-cg':
            args = (X, target, 1. / C, sample_weight)
            w0, n_iter_i = _newton_cg(hess, func, grad, w0, args=args,
                                      maxiter=max_iter, tol=tol)
        elif solver == 'liblinear':
            coef_, intercept_, n_iter_i, = _fit_liblinear(
                X, target, C, fit_intercept, intercept_scaling, None,
                penalty, dual, verbose, max_iter, tol, random_state,
                sample_weight=sample_weight)
            if fit_intercept:
                w0 = np.concatenate([coef_.ravel(), intercept_])
            else:
                w0 = coef_.ravel()

        elif solver in ['sag', 'saga']:
            if multi_class == 'multinomial':
                target = target.astype(X.dtype, copy=False)
                loss = 'multinomial'
            else:
                loss = 'log'
            # alpha is for L2-norm, beta is for L1-norm
            if penalty == 'l1':
                alpha = 0.
                beta = 1. / C
            elif penalty == 'l2':
                alpha = 1. / C
                beta = 0.
            else:  # Elastic-Net penalty
                alpha = (1. / C) * (1 - l1_ratio)
                beta = (1. / C) * l1_ratio

            w0, n_iter_i, warm_start_sag = sag_solver(
                X, target, sample_weight, loss, alpha,
                beta, max_iter, tol,
                verbose, random_state, False, max_squared_sum, warm_start_sag,
                is_saga=(solver == 'saga'))

        else:
            raise ValueError(""solver must be one of {'liblinear', 'lbfgs', ""
                             ""'newton-cg', 'sag'}, got '%s' instead"" % solver)

        if multi_class == 'multinomial':
            n_classes = max(2, classes.size)
            multi_w0 = np.reshape(w0, (n_classes, -1))
            if n_classes == 2:
                multi_w0 = multi_w0[1][np.newaxis, :]
            coefs.append(multi_w0.copy())
        else:
            coefs.append(w0.copy())

        n_iter[i] = n_iter_i

    return np.array(coefs), np.array(Cs), n_iter

","def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
                              max_iter=100, tol=1e-4, verbose=0,
                              solver='lbfgs', coef=None,
                              class_weight=None, dual=False, penalty='l2',
                              intercept_scaling=1., multi_class='auto',
                              random_state=None, check_input=True,
                              max_squared_sum=None, sample_weight=None,
                              l1_ratio=None):
    """"""Compute a Logistic Regression model for a list of regularization
    parameters.

    This is an implementation that uses the result of the previous model
    to speed up computations along the set of solutions, making it faster
    than sequentially calling LogisticRegression for the different parameters.
    Note that there will be no speedup with liblinear solver, since it does
    not handle warm-starting.

    Read more in the :ref:`User Guide `.

    Parameters
    ----------
    X : array-like or sparse matrix, shape (n_samples, n_features)
        Input data.

    y : array-like, shape (n_samples,) or (n_samples, n_targets)
        Input data, target values.

    pos_class : int, None
        The class with respect to which we perform a one-vs-all fit.
        If None, then it is assumed that the given problem is binary.

    Cs : int | array-like, shape (n_cs,)
        List of values for the regularization parameter or integer specifying
        the number of regularization parameters that should be used. In this
        case, the parameters will be chosen in a logarithmic scale between
        1e-4 and 1e4.

    fit_intercept : bool
        Whether to fit an intercept for the model. In this case the shape of
        the returned array is (n_cs, n_features + 1).

    max_iter : int
        Maximum number of iterations for the solver.

    tol : float
        Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
        will stop when ``max{|g_i | i = 1, ..., n} <= tol``
        where ``g_i`` is the i-th component of the gradient.

    verbose : int
        For the liblinear and lbfgs solvers set verbose to any positive
        number for verbosity.

    solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
        Numerical solver to use.

    coef : array-like, shape (n_features,), default None
        Initialization value for coefficients of logistic regression.
        Useless for liblinear solver.

    class_weight : dict or 'balanced', optional
        Weights associated with classes in the form ``{class_label: weight}``.
        If not given, all classes are supposed to have weight one.

        The ""balanced"" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``.

        Note that these weights will be multiplied with sample_weight (passed
        through the fit method) if sample_weight is specified.

    dual : bool
        Dual or primal formulation. Dual formulation is only implemented for
        l2 penalty with liblinear solver. Prefer dual=False when
        n_samples > n_features.

    penalty : str, 'l1', 'l2', or 'elasticnet'
        Used to specify the norm used in the penalization. The 'newton-cg',
        'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
        only supported by the 'saga' solver.

    intercept_scaling : float, default 1.
        Useful only when the solver 'liblinear' is used
        and self.fit_intercept is set to True. In this case, x becomes
        [x, self.intercept_scaling],
        i.e. a ""synthetic"" feature with constant value equal to
        intercept_scaling is appended to the instance vector.
        The intercept becomes ``intercept_scaling * synthetic_feature_weight``.

        Note! the synthetic feature weight is subject to l1/l2 regularization
        as all other features.
        To lessen the effect of regularization on synthetic feature weight
        (and therefore on the intercept) intercept_scaling has to be increased.

    multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
        If the option chosen is 'ovr', then a binary problem is fit for each
        label. For 'multinomial' the loss minimised is the multinomial loss fit
        across the entire probability distribution, *even when the data is
        binary*. 'multinomial' is unavailable when solver='liblinear'.
        'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
        and otherwise selects 'multinomial'.

        .. versionadded:: 0.18
           Stochastic Average Gradient descent solver for 'multinomial' case.
        .. versionchanged:: 0.22
            Default changed from 'ovr' to 'auto' in 0.22.

    random_state : int, RandomState instance or None, default=None
        Used when ``solver`` == 'sag' or 'liblinear' to shuffle the data.
        See :term:`Glossary ` for details.

    check_input : bool, default True
        If False, the input arrays X and y will not be checked.

    max_squared_sum : float, default None
        Maximum squared sum of X over samples. Used only in SAG solver.
        If None, it will be computed, going through all the samples.
        The value should be precomputed to speed up cross validation.

    sample_weight : array-like, shape(n_samples,) optional
        Array of weights that are assigned to individual samples.
        If not provided, then each sample is given unit weight.

    l1_ratio : float or None, optional (default=None)
        The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
        used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
        to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
        to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
        combination of L1 and L2.

    Returns
    -------
    coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
        List of coefficients for the Logistic Regression model. If
        fit_intercept is set to True then the second dimension will be
        n_features + 1, where the last item represents the intercept. For
        ``multiclass='multinomial'``, the shape is (n_classes, n_cs,
        n_features) or (n_classes, n_cs, n_features + 1).

    Cs : ndarray
        Grid of Cs used for cross-validation.

    n_iter : array, shape (n_cs,)
        Actual number of iteration for each Cs.

    Notes
    -----
    You might get slightly different results with the solver liblinear than
    with the others since this uses LIBLINEAR which penalizes the intercept.

    .. versionchanged:: 0.19
        The ""copy"" parameter was removed.
    """"""
    if isinstance(Cs, numbers.Integral):
        Cs = np.logspace(-4, 4, Cs)

    solver = _check_solver(solver, penalty, dual)

    # Preprocessing.
    if check_input:
        X = check_array(X, accept_sparse='csr', dtype=np.float64,
                        accept_large_sparse=solver != 'liblinear')
        y = check_array(y, ensure_2d=False, dtype=None)
        check_consistent_length(X, y)
    _, n_features = X.shape

    classes = np.unique(y)
    random_state = check_random_state(random_state)

    multi_class = _check_multi_class(multi_class, solver, len(classes))
    if pos_class is None and multi_class != 'multinomial':
        if (classes.size > 2):
            raise ValueError('To fit OvR, use the pos_class argument')
        # np.unique(y) gives labels in sorted order.
        pos_class = classes[1]

    # If sample weights exist, convert them to array (support for lists)
    # and check length
    # Otherwise set them to 1 for all examples
    sample_weight = _check_sample_weight(sample_weight, X,
                                         dtype=X.dtype)

    # If class_weights is a dict (provided by the user), the weights
    # are assigned to the original labels. If it is ""balanced"", then
    # the class_weights are assigned after masking the labels with a OvR.
    le = LabelEncoder()
    if isinstance(class_weight, dict) or multi_class == 'multinomial':
        class_weight_ = compute_class_weight(class_weight, classes, y)
        sample_weight *= class_weight_[le.fit_transform(y)]

    # For doing a ovr, we need to mask the labels first. for the
    # multinomial case this is not necessary.
    if multi_class == 'ovr':
        w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
        mask_classes = np.array([-1, 1])
        mask = (y == pos_class)
        y_bin = np.ones(y.shape, dtype=X.dtype)
        y_bin[~mask] = -1.
        # for compute_class_weight

        if class_weight == ""balanced"":
            class_weight_ = compute_class_weight(class_weight, mask_classes,
                                                 y_bin)
            sample_weight *= class_weight_[le.fit_transform(y_bin)]

    else:
        if solver not in ['sag', 'saga']:
            lbin = LabelBinarizer()
            Y_multi = lbin.fit_transform(y)
            if Y_multi.shape[1] == 1:
                Y_multi = np.hstack([1 - Y_multi, Y_multi])
        else:
            # SAG multinomial solver needs LabelEncoder, not LabelBinarizer
            le = LabelEncoder()
            Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)

        w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
                      order='F', dtype=X.dtype)

    if coef is not None:
        # it must work both giving the bias term and not
        if multi_class == 'ovr':
            if coef.size not in (n_features, w0.size):
                raise ValueError(
                    'Initialization coef is of shape %d, expected shape '
                    '%d or %d' % (coef.size, n_features, w0.size))
            w0[:coef.size] = coef
        else:
            # For binary problems coef.shape[0] should be 1, otherwise it
            # should be classes.size.
            n_classes = classes.size
            if n_classes == 2:
                n_classes = 1

            if (coef.shape[0] != n_classes or
                    coef.shape[1] not in (n_features, n_features + 1)):
                raise ValueError(
                    'Initialization coef is of shape (%d, %d), expected '
                    'shape (%d, %d) or (%d, %d)' % (
                        coef.shape[0], coef.shape[1], classes.size,
                        n_features, classes.size, n_features + 1))

            if n_classes == 1:
                w0[0, :coef.shape[1]] = -coef
                w0[1, :coef.shape[1]] = coef
            else:
                w0[:, :coef.shape[1]] = coef

    if multi_class == 'multinomial':
        # scipy.optimize.minimize and newton-cg accepts only
        # ravelled parameters.
        if solver in ['lbfgs', 'newton-cg']:
            w0 = w0.ravel()
        target = Y_multi
        if solver == 'lbfgs':
            func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
        elif solver == 'newton-cg':
            func = lambda x, *args: _multinomial_loss(x, *args)[0]
            grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
            hess = _multinomial_grad_hess
        warm_start_sag = {'coef': w0.T}
    else:
        target = y_bin
        if solver == 'lbfgs':
            func = _logistic_loss_and_grad
        elif solver == 'newton-cg':
            func = _logistic_loss
            grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
            hess = _logistic_grad_hess
        warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}

    coefs = list()
    n_iter = np.zeros(len(Cs), dtype=np.int32)
    for i, C in enumerate(Cs):
        if solver == 'lbfgs':
            iprint = [-1, 50, 1, 100, 101][
                np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
            opt_res = optimize.minimize(
                func, w0, method=""L-BFGS-B"", jac=True,
                args=(X, target, 1. / C, sample_weight),
                options={""iprint"": iprint, ""gtol"": tol, ""maxiter"": max_iter}
            )
            n_iter_i = _check_optimize_result(
                solver, opt_res, max_iter,
                extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
            w0, loss = opt_res.x, opt_res.fun
        elif solver == 'newton-cg':
            args = (X, target, 1. / C, sample_weight)
            w0, n_iter_i = _newton_cg(hess, func, grad, w0, args=args,
                                      maxiter=max_iter, tol=tol)
        elif solver == 'liblinear':
            coef_, intercept_, n_iter_i, = _fit_liblinear(
                X, target, C, fit_intercept, intercept_scaling, None,
                penalty, dual, verbose, max_iter, tol, random_state,
                sample_weight=sample_weight)
            if fit_intercept:
                w0 = np.concatenate([coef_.ravel(), intercept_])
            else:
                w0 = coef_.ravel()

        elif solver in ['sag', 'saga']:
            if multi_class == 'multinomial':
                target = target.astype(X.dtype, copy=False)
                loss = 'multinomial'
            else:
                loss = 'log'
            # alpha is for L2-norm, beta is for L1-norm
            if penalty == 'l1':
                alpha = 0.
                beta = 1. / C
            elif penalty == 'l2':
                alpha = 1. / C
                beta = 0.
            else:  # Elastic-Net penalty
                alpha = (1. / C) * (1 - l1_ratio)
                beta = (1. / C) * l1_ratio

            w0, n_iter_i, warm_start_sag = sag_solver(
                X, target, sample_weight, loss, alpha,
                beta, max_iter, tol,
                verbose, random_state, False, max_squared_sum, warm_start_sag,
                is_saga=(solver == 'saga'))

        else:
            raise ValueError(""solver must be one of {'liblinear', 'lbfgs', ""
                             ""'newton-cg', 'sag'}, got '%s' instead"" % solver)

        if multi_class == 'multinomial':
            n_classes = max(2, classes.size)
            multi_w0 = np.reshape(w0, (n_classes, -1))
            if n_classes == 2:
                multi_w0 = multi_w0[1][np.newaxis, :]
            coefs.append(multi_w0.copy())
        else:
            coefs.append(w0.copy())

        n_iter[i] = n_iter_i

    return np.array(coefs), np.array(Cs), n_iter

"
47921,"def build_argparser():
    parser = ArgumentParser(add_help=False)
    args = parser.add_argument_group('Options')
    args.add_argument('-h', '--help', action='help',
                      default=SUPPRESS, help='Show this help message and exit.')
    args.add_argument(""-m_encoder"", help=""Required. Path to an .xml file with a trained encoder part of the model"",
                      required=True, type=str)
    args.add_argument(""-m_decoder"", help=""Required. Path to an .xml file with a trained decoder part of the model"",
                      required=True, type=str)
    args.add_argument(""--interactive"", help=""Optional. Enables interactive mode. In this mode images are read from the web-camera."",
                      action='store_true', default=False)
    args.add_argument(""-i"", ""--input"", help=""Optional. Path to a folder with images or path to an image files"",
                      required=False, type=str)
    args.add_argument(""-o"", ""--output_file"",
                      help=""Optional. Path to file where to store output. If not mentioned, result will be stored""
                      ""in the console."",
                      type=str)
    args.add_argument(""--vocab_path"", help=""Required. Path to vocab file to construct meaningful phrase"",
                      type=str, required=True)
    args.add_argument(""--max_formula_len"",
                      help=""Optional. Defines maximum length of the formula (number of tokens to decode)"",
                      default=""128"", type=int)
    args.add_argument(""--conf_thresh"", help=""Optional. Probability threshold to trat model prediction as meaningful"",
                      default=CONFIDENCE_THRESH, type=float)
    args.add_argument(""-d"", ""--device"",
                      help=""Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is ""
                           ""acceptable. Sample will look for a suitable plugin for device specified. Default value is CPU"",
                      default=""CPU"", type=str)
    args.add_argument(""--camera_device"", default=0, type=int,
                      help='Optional. Device id of the web-camera. Change it only if you have more then one camera')
    args.add_argument(""--resolution"", default=DEFAULT_RESOLUTION, type=int, nargs=2,
                      help=f'Optional. Resolution of the demo application window. Default: {DEFAULT_RESOLUTION}')
    args.add_argument('--preprocessing_type', choices=PREPROCESSING.keys(),
                      help=""Optional. Type of the preprocessing"", default='crop')
    args.add_argument('-pc', '--perf_counts',
                      action='store_true', default=False)
    args.add_argument('--imgs_layer', help='Optional. Encoder input name for images. See README for details.',
                      default='imgs')
    args.add_argument('--row_enc_out_layer', help='Optional. Encoder output key for row_enc_out. See README for details.',
                      default='row_enc_out')
    args.add_argument('--hidden_layer', help='Optional. Encoder output key for hidden. See README for details.',
                      default='hidden')
    args.add_argument('--context_layer', help='Optional. Encoder output key for context. See README for details.',
                      default='context')
    args.add_argument('--init_0_layer', help='Optional. Encoder output key for init_0. See README for details.',
                      default='init_0')
    args.add_argument('--dec_st_c_layer', help='Optional. Decoder input key for dec_st_c. See README for details.',
                      default='dec_st_c')
    args.add_argument('--dec_st_h_layer', help='Optional. Decoder input key for dec_st_h. See README for details.',
                      default='dec_st_h')
    args.add_argument('--dec_st_c_t_layer', help='Optional. Decoder output key for dec_st_c_t. See README for details.',
                      default='dec_st_c_t')
    args.add_argument('--dec_st_h_t_layer', help='Optional. Decoder output key for dec_st_h_t. See README for details.',
                      default='dec_st_h_t')
    args.add_argument('--output_layer', help='Optional. Decoder output key for output. See README for details.',
                      default='output')
    args.add_argument('--output_prev_layer', help='Optional. Decoder input key for output_prev. See README for details.',
                      default='output_prev')
    args.add_argument('--logit_layer', help='Optional. Decoder output key for logit. See README for details.',
                      default='logit')
    args.add_argument('--tgt_layer', help='Optional. Decoder input key for tgt. See README for details.',
                      default='tgt')
    return parser

","def build_argparser():
    parser = ArgumentParser(add_help=False)
    args = parser.add_argument_group('Options')
    args.add_argument('-h', '--help', action='help',
                      default=SUPPRESS, help='Show this help message and exit.')
    args.add_argument(""-m_encoder"", help=""Required. Path to an .xml file with a trained encoder part of the model"",
                      required=True, type=str)
    args.add_argument(""-m_decoder"", help=""Required. Path to an .xml file with a trained decoder part of the model"",
                      required=True, type=str)
    args.add_argument(""--interactive"", help=""Optional. Enables interactive mode. In this mode images are read from the web-camera."",
                      action='store_true', default=False)
    args.add_argument(""-i"", ""--input"", help=""Optional. Path to a folder with images or path to an image files"",
                      required=False, type=str)
    args.add_argument(""-o"", ""--output_file"",
                      help=""Optional. Path to file where to store output. If not mentioned, result will be stored""
                      ""in the console."",
                      type=str)
    args.add_argument(""--vocab_path"", help=""Required. Path to vocab file to construct meaningful phrase"",
                      type=str, required=True)
    args.add_argument(""--max_formula_len"",
                      help=""Optional. Defines maximum length of the formula (number of tokens to decode)"",
                      default=""128"", type=int)
    args.add_argument(""--conf_thresh"", help=""Optional. Probability threshold to trat model prediction as meaningful"",
                      default=CONFIDENCE_THRESH, type=float)
    args.add_argument(""-d"", ""--device"",
                      help=""Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is ""
                           ""acceptable. Sample will look for a suitable plugin for device specified. Default value is CPU"",
                      default=""CPU"", type=str)
    args.add_argument(""--camera_device"", default=0, type=int,
                      help='Optional. Device id of the web-camera. Change it only if you have more then one camera')
    args.add_argument(""--resolution"", default=(1280, 720), type=int, nargs=2,
                      help=f'Optional. Resolution of the demo application window. Default: {DEFAULT_RESOLUTION}')
    args.add_argument('--preprocessing_type', choices=PREPROCESSING.keys(),
                      help=""Optional. Type of the preprocessing"", default='crop')
    args.add_argument('-pc', '--perf_counts',
                      action='store_true', default=False)
    args.add_argument('--imgs_layer', help='Optional. Encoder input name for images. See README for details.',
                      default='imgs')
    args.add_argument('--row_enc_out_layer', help='Optional. Encoder output key for row_enc_out. See README for details.',
                      default='row_enc_out')
    args.add_argument('--hidden_layer', help='Optional. Encoder output key for hidden. See README for details.',
                      default='hidden')
    args.add_argument('--context_layer', help='Optional. Encoder output key for context. See README for details.',
                      default='context')
    args.add_argument('--init_0_layer', help='Optional. Encoder output key for init_0. See README for details.',
                      default='init_0')
    args.add_argument('--dec_st_c_layer', help='Optional. Decoder input key for dec_st_c. See README for details.',
                      default='dec_st_c')
    args.add_argument('--dec_st_h_layer', help='Optional. Decoder input key for dec_st_h. See README for details.',
                      default='dec_st_h')
    args.add_argument('--dec_st_c_t_layer', help='Optional. Decoder output key for dec_st_c_t. See README for details.',
                      default='dec_st_c_t')
    args.add_argument('--dec_st_h_t_layer', help='Optional. Decoder output key for dec_st_h_t. See README for details.',
                      default='dec_st_h_t')
    args.add_argument('--output_layer', help='Optional. Decoder output key for output. See README for details.',
                      default='output')
    args.add_argument('--output_prev_layer', help='Optional. Decoder input key for output_prev. See README for details.',
                      default='output_prev')
    args.add_argument('--logit_layer', help='Optional. Decoder output key for logit. See README for details.',
                      default='logit')
    args.add_argument('--tgt_layer', help='Optional. Decoder input key for tgt. See README for details.',
                      default='tgt')
    return parser

"
30548,"def filter_misp_response(misp_response: List[dict]) -> List[dict]:
    """""" Filter MISP response -
        Remove from Attributes and Objects the following keys if exists:
         1. MetaData keys - Galaxy, Tags (The keys both determine classification by publisher)
         2. Selected keys - Removing all not selected keys in response.
         3. Related events - Remove related events if not chosen.

    Args:
        misp_response(list): valid response of MISP client using PyMisp

    Returns:
        list: Filtered response
    """"""
    metadata_state = demisto.params().get('metadata')
    related_events_state = demisto.params().get('related_events')
    selected_keys: List[str] = [item.lower() for item in demisto.params().get('context_select')]

    if not related_events_state or not metadata_state or selected_keys:
        for i in range(len(misp_response)):
            # Filter Misp Event object
            misp_response[i]['Event']: dict = filter_obj(misp_response[i]['Event'], is_att=False)
            # Filter Misp Attribute object
            misp_response[i]['Event']['Attribute'] = [filter_obj(att) for att in
                                                      misp_response[i]['Event']['Attribute']]
            # Filter Misp Object object
            for j in range(len(misp_response[i]['Event']['Object'])):
                misp_response[i]['Event']['Object'][j]['Attribute'] = [filter_obj(att) for att in
                                                                       misp_response[i]['Event']['Object'][j]['Attribute']]

    return misp_response

","def filter_misp_response(misp_response: List[dict]) -> List[dict]:
    """""" Filter MISP response -
        Remove from Attributes and Objects the following keys if exists:
         1. MetaData keys - Galaxy, Tags (The keys both determine classification by publisher)
         2. Selected keys - Removing all not selected keys in response.
         3. Related events - Remove related events if not chosen.

    Args:
        misp_response(list): valid response of MISP client using PyMisp

    Returns:
        list: Filtered response
    """"""
    metadata_state = demisto.params().get('metadata')
    related_events_state = demisto.params().get('related_events')
    selected_keys: List[str] = [item.lower() for item in demisto.params().get('context_select')]

    if not related_events_state or not metadata_state or selected_keys:
        for res in misp_response:
            # Filter Misp Event object
            misp_response[i]['Event']: dict = filter_obj(misp_response[i]['Event'], is_att=False)
            # Filter Misp Attribute object
            misp_response[i]['Event']['Attribute'] = [filter_obj(att) for att in
                                                      misp_response[i]['Event']['Attribute']]
            # Filter Misp Object object
            for j in range(len(misp_response[i]['Event']['Object'])):
                misp_response[i]['Event']['Object'][j]['Attribute'] = [filter_obj(att) for att in
                                                                       misp_response[i]['Event']['Object'][j]['Attribute']]

    return misp_response

"
27265,"def test_date_time_literals():
    ibis.date(2022, 2, 4)
    ibis.time(16, 20, 00)
    ibis.timestamp(2022, 2, 4, 16, 20, 00)
","def test_date_time_literals():
    ibis.date(2022, 2, 4)
    ibis.time(16, 20, 00)
    assert ibis.timestamp(2022, 2, 4, 16, 20, 00).type() == dt.timestamp
"
26427,"def resetCache():
    if _resource['iconic'] is not None:
        _resource['iconic'].icon_cache = {}

","def reset_cache():
    if _resource['iconic'] is not None:
        _resource['iconic'].icon_cache = {}

"
20175,"def graph_response(graph, format):
    '''
    Return a proper flask response for a RDF resource given an expected format.
    '''
    fmt = guess_format(format)
    if not fmt:
        abort(404)
    headers = {
        'Content-Type': RDF_MIME_TYPES[fmt]
    }
    kwargs = {}
    if fmt == 'json-ld':
        kwargs['context'] = context
    if isinstance(graph, RdfResource):
        graph = graph.graph
    return escape_xml_illegal_chars(graph.serialize(format=fmt, **kwargs).decode(""utf-8"")), 200, headers
","def graph_response(graph, format):
    '''
    Return a proper flask response for a RDF resource given an expected format.
    '''
    fmt = guess_format(format)
    if not fmt:
        abort(404)
    headers = {
        'Content-Type': RDF_MIME_TYPES[fmt]
    }
    kwargs = {}
    if fmt == 'json-ld':
        kwargs['context'] = context
    if isinstance(graph, RdfResource):
        graph = graph.graph
    return escape_xml_illegal_chars(graph.serialize(format=fmt, **kwargs).decode('utf-8')), 200, headers
"
22236,"def build_dependency_manager(app_config_dict=None, resolution_config_dict=None, conf_file=None, default_tool_dependency_dir=None):
    """"""Build a DependencyManager object from app and/or resolution config.

    If app_config_dict is specified, it should be application configuration information
    and configuration options are generally named to identify the context is dependency
    management (e.g. conda_prefix not prefix or use_cached_dependency_manager not cache).
    resolution_config_dict if specified is assumed to be the to_dict() version of a
    DependencyManager and should only contain dependency configuration options.
    """"""

    if app_config_dict is None:
        app_config_dict = {}
    else:
        app_config_dict = app_config_dict.copy()

    tool_dependency_dir = app_config_dict.get(""tool_dependency_dir"", default_tool_dependency_dir)
    if tool_dependency_dir and tool_dependency_dir.lower() == ""none"":
        app_config_dict[""tool_dependency_dir""] = None

    if resolution_config_dict is None and ""dependency_resolution"" in app_config_dict:
        resolution_config_dict = app_config_dict[""dependency_resolution""]

    if resolution_config_dict:
        # Convert local to_dict options into global ones.

        # to_dict() has ""cache"", ""cache_dir"", ""use"", ""default_base_path"", ""resolvers"", ""precache""
        app_config_props_from_resolution_config = {
            ""use_tool_dependencies"": resolution_config_dict.get(""use"", None),
            ""tool_dependency_dir"": resolution_config_dict.get(""default_base_path"", None),
            ""dependency_resolvers"": resolution_config_dict.get(""resolvers"", None),
            ""tool_dependency_cache_dir"": resolution_config_dict.get(""cache_dir"", None),
            ""precache_dependencies"": resolution_config_dict.get(""precache"", None),
            ""use_cached_dependency_manager"": resolution_config_dict.get(""cache"", None),
        }

        for key, value in app_config_props_from_resolution_config.items():
            if value is not None:
                app_config_dict[key] = value

    use_tool_dependencies = app_config_dict.get(""use_tool_dependencies"", None)
    # if we haven't set an explicit True or False, try to infer from config...
    if use_tool_dependencies is None:
        use_tool_dependencies = app_config_dict.get(""tool_dependency_dir"", default_tool_dependency_dir) is not None or \
            app_config_dict.get(""dependency_resolvers"") or \
            (conf_file and os.path.exists(conf_file))

    if use_tool_dependencies:
        dependency_manager_kwds = {
            ""default_base_path"": app_config_dict.get(""tool_dependency_dir"", default_tool_dependency_dir),
            ""conf_file"": conf_file,
            ""app_config"": app_config_dict,
        }
        if string_as_bool(app_config_dict.get(""use_cached_dependency_manager"")):
            dependency_manager = CachedDependencyManager(**dependency_manager_kwds)
        else:
            dependency_manager = DependencyManager(**dependency_manager_kwds)
    else:
        dependency_manager = NullDependencyManager()

    return dependency_manager

","def build_dependency_manager(app_config_dict=None, resolution_config_dict=None, conf_file=None, default_tool_dependency_dir=None):
    """"""Build a DependencyManager object from app and/or resolution config.

    If app_config_dict is specified, it should be application configuration information
    and configuration options are generally named to identify the context of dependency
    management (e.g. conda_prefix not prefix or use_cached_dependency_manager not cache).
    resolution_config_dict if specified is assumed to be the to_dict() version of a
    DependencyManager and should only contain dependency configuration options.
    """"""

    if app_config_dict is None:
        app_config_dict = {}
    else:
        app_config_dict = app_config_dict.copy()

    tool_dependency_dir = app_config_dict.get(""tool_dependency_dir"", default_tool_dependency_dir)
    if tool_dependency_dir and tool_dependency_dir.lower() == ""none"":
        app_config_dict[""tool_dependency_dir""] = None

    if resolution_config_dict is None and ""dependency_resolution"" in app_config_dict:
        resolution_config_dict = app_config_dict[""dependency_resolution""]

    if resolution_config_dict:
        # Convert local to_dict options into global ones.

        # to_dict() has ""cache"", ""cache_dir"", ""use"", ""default_base_path"", ""resolvers"", ""precache""
        app_config_props_from_resolution_config = {
            ""use_tool_dependencies"": resolution_config_dict.get(""use"", None),
            ""tool_dependency_dir"": resolution_config_dict.get(""default_base_path"", None),
            ""dependency_resolvers"": resolution_config_dict.get(""resolvers"", None),
            ""tool_dependency_cache_dir"": resolution_config_dict.get(""cache_dir"", None),
            ""precache_dependencies"": resolution_config_dict.get(""precache"", None),
            ""use_cached_dependency_manager"": resolution_config_dict.get(""cache"", None),
        }

        for key, value in app_config_props_from_resolution_config.items():
            if value is not None:
                app_config_dict[key] = value

    use_tool_dependencies = app_config_dict.get(""use_tool_dependencies"", None)
    # if we haven't set an explicit True or False, try to infer from config...
    if use_tool_dependencies is None:
        use_tool_dependencies = app_config_dict.get(""tool_dependency_dir"", default_tool_dependency_dir) is not None or \
            app_config_dict.get(""dependency_resolvers"") or \
            (conf_file and os.path.exists(conf_file))

    if use_tool_dependencies:
        dependency_manager_kwds = {
            ""default_base_path"": app_config_dict.get(""tool_dependency_dir"", default_tool_dependency_dir),
            ""conf_file"": conf_file,
            ""app_config"": app_config_dict,
        }
        if string_as_bool(app_config_dict.get(""use_cached_dependency_manager"")):
            dependency_manager = CachedDependencyManager(**dependency_manager_kwds)
        else:
            dependency_manager = DependencyManager(**dependency_manager_kwds)
    else:
        dependency_manager = NullDependencyManager()

    return dependency_manager

"
12727,"def test_from_image_build_arg_names(rule_runner: RuleRunner) -> None:
    rule_runner.write_files(
        {
            ""test/a/BUILD"": """"""docker_image(name=""image"")"""""",
            ""test/a/Dockerfile"": ""FROM upstream"",
            ""test/b/BUILD"": """"""docker_image(name=""image"")"""""",
            ""test/b/Dockerfile"": dedent(
                """"""\
                ARG BASE_IMAGE=test/a:image
                FROM ${BASE_IMAGE} AS base
                """"""
            ),
        }
    )
    addr = Address(""test/b"", target_name=""image"")
    info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)])
    assert info.from_image_build_arg_names == (""BASE_IMAGE"",)

","def test_from_image_build_arg_names(rule_runner: RuleRunner) -> None:
    rule_runner.write_files(
        {
            ""test/a/BUILD"": ""docker_image(name='image')"",
            ""test/a/Dockerfile"": ""FROM upstream"",
            ""test/b/BUILD"": """"""docker_image(name=""image"")"""""",
            ""test/b/Dockerfile"": dedent(
                """"""\
                ARG BASE_IMAGE=test/a:image
                FROM ${BASE_IMAGE} AS base
                """"""
            ),
        }
    )
    addr = Address(""test/b"", target_name=""image"")
    info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)])
    assert info.from_image_build_arg_names == (""BASE_IMAGE"",)

"
32543,"def sc_create_update_indicator_command(client: MsClient, args: Dict[str, str]) -> CommandResults:
    """"""Updates an indicator if exists, if does not exist, create new one
    Note: CIDR notation for IPs is not supported.

    Args:
        client: MsClient
        args: arguments from CortexSOAR.
           Must contains 'indicator_value', 'indicator_type','indicator_description', 'indicator_title', and 'action'.

    """"""
    indicator_value = args['indicator_value']
    indicator_type = args['indicator_type']
    action = args['action']
    severity = args.get('severity')
    expiration_time = get_future_time(args.get('expiration_time', '1 day'))
    indicator_description = args['indicator_description']
    indicator_title = args['indicator_title']
    indicator_application = args.get('indicator_application', '')
    recommended_actions = args.get('recommended_actions', '')
    rbac_group_names = argToList(args.get('rbac_group_names', []))
    generate_alert = args.get('generate_alert', True)

    indicator = client.create_update_indicator_security_center_api(
        indicator_value=indicator_value, expiration_date_time=expiration_time,
        description=indicator_description, severity=severity, indicator_type=indicator_type, action=action,
        indicator_title=indicator_title, indicator_application=indicator_application,
        recommended_actions=recommended_actions, rbac_group_names=rbac_group_names, generate_alert=generate_alert
    )
    if indicator:
        indicator_value = indicator.get('indicatorValue')  # type:ignore
        dbot_indicator = get_indicator_dbot_object(indicator)
        human_readable = tableToMarkdown(f'Indicator {indicator_value} was updated successfully.',
                                         indicator, headers=list(SC_INDICATORS_HEADERS), removeNull=True)
        return CommandResults(outputs=indicator, indicator=dbot_indicator,
                              readable_output=human_readable, outputs_key_field='id',
                              outputs_prefix='MicrosoftATP.Indicators')
    else:
        return CommandResults(readable_output=f'Indicator {indicator_value} was NOT updated.')

","def sc_create_update_indicator_command(client: MsClient, args: Dict[str, str]) -> CommandResults:
    """"""Updates an indicator if exists, if does not exist, create new one
    Note: CIDR notation for IPs is not supported.

    Args:
        client: MsClient
        args: arguments from CortexSOAR.
           Must contains 'indicator_value', 'indicator_type','indicator_description', 'indicator_title', and 'action'.

    """"""
    indicator_value = args['indicator_value']
    indicator_type = args['indicator_type']
    action = args['action']
    severity = args.get('severity')
    expiration_time = get_future_time(args.get('expiration_time', '1 day'))
    indicator_description = args['indicator_description']
    indicator_title = args['indicator_title']
    indicator_application = args.get('indicator_application', '')
    recommended_actions = args.get('recommended_actions', '')
    rbac_group_names = argToList(args.get('rbac_group_names', []))
    generate_alert = argToBoolean(args.get('generate_alert', True))

    indicator = client.create_update_indicator_security_center_api(
        indicator_value=indicator_value, expiration_date_time=expiration_time,
        description=indicator_description, severity=severity, indicator_type=indicator_type, action=action,
        indicator_title=indicator_title, indicator_application=indicator_application,
        recommended_actions=recommended_actions, rbac_group_names=rbac_group_names, generate_alert=generate_alert
    )
    if indicator:
        indicator_value = indicator.get('indicatorValue')  # type:ignore
        dbot_indicator = get_indicator_dbot_object(indicator)
        human_readable = tableToMarkdown(f'Indicator {indicator_value} was updated successfully.',
                                         indicator, headers=list(SC_INDICATORS_HEADERS), removeNull=True)
        return CommandResults(outputs=indicator, indicator=dbot_indicator,
                              readable_output=human_readable, outputs_key_field='id',
                              outputs_prefix='MicrosoftATP.Indicators')
    else:
        return CommandResults(readable_output=f'Indicator {indicator_value} was NOT updated.')

"
59617,"def load_earth_relief(resolution=""01d""):
    """"""
    Load Earth relief grids (topography and bathymetry) in various resolutions.

    The grids are downloaded to a user data directory (usually ``~/.gmt/``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    These grids can also be accessed by passing in the file name
    ``'@earth_relief_XXm'`` or ``'@earth_relief_XXs'`` to any grid
    plotting/processing function.

    Parameters
    ----------
    resolution : str
        The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for
        arc-degree, arc-minute and arc-second. It can be ``'01d'``, ``'60m'``,
        ``'30m'``, ``'10m'``, ``'05m'``, ``'02m'``, ``'01m'``, ``'30s'``
        or ``'15s'``.

    Returns
    -------
    grid : xarray.DataArray
        The Earth relief grid. Coordinates are latitude and longitude in
        degrees. Relief is in meters.

    """"""
    _is_valid_resolution(resolution)
    fname = which(""@earth_relief_{}"".format(resolution), download=""u"")
    grid = xr.open_dataarray(fname)
    # Add some metadata to the grid
    grid.name = ""elevation""
    grid.attrs[""long_name""] = ""elevation relative to the geoid""
    grid.attrs[""units""] = ""meters""
    grid.attrs[""vertical_datum""] = ""EMG96""
    grid.attrs[""horizontal_datum""] = ""WGS84""
    # Remove the actual range because it gets outdated when indexing the grid,
    # which causes problems when exporting it to netCDF for usage on the
    # command-line.
    grid.attrs.pop(""actual_range"")
    for coord in grid.coords:
        grid[coord].attrs.pop(""actual_range"")
    return grid

","def load_earth_relief(resolution=""01d""):
    """"""
    Load Earth relief grids (topography and bathymetry) in various resolutions.

    The grids are downloaded to a user data directory (usually ``~/.gmt/``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    These grids can also be accessed by passing in the file name
    ``'@earth_relief_XXm'`` or ``'@earth_relief_XXs'`` to any grid
    plotting/processing function.

    Parameters
    ----------
    resolution : str
        The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for
        arc-degree, arc-minute and arc-second. It can be ``'01d'``, ``'30m'``,
        ``'10m'``, ``'05m'``, ``'02m'``, ``'01m'``, ``'30s'`` or ``'15s'``.

    Returns
    -------
    grid : xarray.DataArray
        The Earth relief grid. Coordinates are latitude and longitude in
        degrees. Relief is in meters.

    """"""
    _is_valid_resolution(resolution)
    fname = which(""@earth_relief_{}"".format(resolution), download=""u"")
    grid = xr.open_dataarray(fname)
    # Add some metadata to the grid
    grid.name = ""elevation""
    grid.attrs[""long_name""] = ""elevation relative to the geoid""
    grid.attrs[""units""] = ""meters""
    grid.attrs[""vertical_datum""] = ""EMG96""
    grid.attrs[""horizontal_datum""] = ""WGS84""
    # Remove the actual range because it gets outdated when indexing the grid,
    # which causes problems when exporting it to netCDF for usage on the
    # command-line.
    grid.attrs.pop(""actual_range"")
    for coord in grid.coords:
        grid[coord].attrs.pop(""actual_range"")
    return grid

"
45753,"def _get_addon_name(full_name):
    # The (Odoo) module name can be in the ``odoo.addons`` namespace
    # or not. For instance, module ``sale`` can be imported as
    # ``odoo.addons.sale`` (the right way) or ``sale`` (for backward
    # compatibility).
    module_parts = full_name.split(""."")
    if len(module_parts) > 2 and module_parts[:2] == [""odoo"", ""addons""]:
        addon_name = full_name.split(""."")[2]
    else:
        addon_name = full_name.split(""."")[0]
    return addon_name

","def _get_addon_name(full_name):
    # The (Odoo) module name can be in the ``odoo.addons`` namespace
    # or not. For instance, module ``sale`` can be imported as
    # ``odoo.addons.sale`` (the right way) or ``sale`` (for backward
    # compatibility).
    module_parts = full_name.split(""."")
    if len(module_parts) > 2 and module_parts[:2] == [""odoo"", ""addons""]:
        addon_name = full_name.split(""."")[2]
    else:
        addon_name = module_parts[0]
    return addon_name

"
13410,"def test_06_delete_the_encrypted_pool_and_verify_the_system_dataset(request, pool_data):
    payload = {
        'cascade': True,
        'restart_services': True,
        'destroy': True
    }
    results = POST(f'/pool/id/{pool_data[""encrypted""][""id""]}/export/', payload)
    assert results.status_code == 200, results.text
    job_id = results.json()
    job_status = wait_on_job(job_id, 120)
    assert job_status['state'] == 'SUCCESS', str(job_status['results'])

    results = GET(""/systemdataset/"")
    assert results.status_code == 200, results.text
    assert isinstance(results.json(), dict), results.text
    assert results.json()['pool'] == 'boot-pool', results.text
    assert results.json()['basename'] == 'boot-pool/.system', results.text

","def test_06_verify_sysds_after_passphrase_encrypted_pool_is_deleted(request, pool_data):
    payload = {
        'cascade': True,
        'restart_services': True,
        'destroy': True
    }
    results = POST(f'/pool/id/{pool_data[""encrypted""][""id""]}/export/', payload)
    assert results.status_code == 200, results.text
    job_id = results.json()
    job_status = wait_on_job(job_id, 120)
    assert job_status['state'] == 'SUCCESS', str(job_status['results'])

    results = GET(""/systemdataset/"")
    assert results.status_code == 200, results.text
    assert isinstance(results.json(), dict), results.text
    assert results.json()['pool'] == 'boot-pool', results.text
    assert results.json()['basename'] == 'boot-pool/.system', results.text

"
30716,"def add_account(client, args):
    title = f'{INTEGRATION_NAME} - Add a New Account'
    raws = []
    cyberark_ec = []
    raw_response = client.add_account(user_name=args.get('user-name'), address=args.get('address'),
                                      platform_id=args.get('platform-Id'), safe_name=args.get('safe-name'),
                                      name=args.get('name'), secret=args.get('secret'),
                                      secret_type=args.get('secret-type'),
                                      platform_account_properties=args.get('platform-account-properties'),
                                      automatic_management_enabled=args.get('automatic-management-enabled'),
                                      manual_management_reason=args.get('manual-management-reason'),
                                      remote_machines=args.get('remote-machines'),
                                      access_restricted_to_remote_machines=args.get('access-restricted-'
                                                                                    'to-remote-machines'))
    if raw_response:
        raws.append(raw_response)
        cyberark_ec.append({
            'AccountName': raw_response['name'],
            'UserName': raw_response['userName'],
            'PlatformID': raw_response['platformId'],
            'SafeName': raw_response['safeName'],
            'AccountID': raw_response['id'],
            'CreatedTime': raw_response['createdTime']
        })

    if not raws:
        return f'{INTEGRATION_NAME} - Could not create the new Account'

    context_entry = {
        ""CyberArk.Accounts"": cyberark_ec
    }

    human_readable = tableToMarkdown(t=context_entry.get('CyberArk.Accounts'), name=title)
    return [human_readable, context_entry, raws]

","def add_account(client, args):
    title = f'{INTEGRATION_NAME} - Add a New Account'
    raws = []
    cyberark_ec = []
    raw_response = client.add_account(user_name=args.get('user-name'), address=args.get('address'),
                                      platform_id=args.get('platform-Id'), safe_name=args.get('safe-name'),
                                      name=args.get('name'), secret=args.get('secret'),
                                      secret_type=args.get('secret-type'),
                                      platform_account_properties=args.get('platform-account-properties'),
                                      automatic_management_enabled=args.get('automatic-management-enabled'),
                                      manual_management_reason=args.get('manual-management-reason'),
                                      remote_machines=args.get('remote-machines'),
                                      access_restricted_to_remote_machines=args.get('access-restricted-'
                                                                                    'to-remote-machines'))
    if raw_response:
        raws.append(raw_response)
        cyberark_ec.append({
            'AccountName': raw_response['name'],
            'UserName': raw_response['userName'],
            'PlatformID': raw_response['platformId'],
            'SafeName': raw_response['safeName'],
            'AccountID': raw_response['id'],
            'CreatedTime': raw_response['createdTime']
        })

    if not raws:
        return (f'{INTEGRATION_NAME} - Could not create the new Account', {}, {})

    context_entry = {
        ""CyberArk.Accounts"": cyberark_ec
    }

    human_readable = tableToMarkdown(t=context_entry.get('CyberArk.Accounts'), name=title)
    return [human_readable, context_entry, raws]

"
59892,"def l2_norm(dim: int, var: pp.ad.Ad_array) -> pp.ad.Ad_array:
    """"""L2 norm of a vector variable.

    For the example of dim=3 components and n vectors, the ordering is assumed
    to be
        [u0, v0, w0, u1, v1, w1, ..., un, vn, wn]

    Usage note:
        See module level documentation on how to wrap functions like this in ad.Function.

    Parameters
    ----------
    dim : int
        Dimension, i.e. number of vector components.
    var : pp.ad.Ad_array
        Ad operator (variable or expression) which is argument of the norm
        function.

    Returns
    -------
    pp.ad.Ad_array
        The norm of var with appropriate val and jac attributes.

    """"""

    if dim == 1:
        return pp.ad.functions.abs(var)
    resh = np.reshape(var.val, (dim, -1), order=""F"")
    vals = np.linalg.norm(resh, axis=0)
    # Avoid dividing by zero
    tol = 1e-12
    nonzero_inds = vals > tol
    jac_vals = np.zeros(resh.shape)
    jac_vals[:, nonzero_inds] = resh[:, nonzero_inds] / vals[nonzero_inds]
    jac_vals[:, ~nonzero_inds] = 1
    # Prepare for left multiplication with var.jac to yield
    # norm(var).jac = var/norm(var) * var.jac
    dim_size = var.val.size
    size = int(var.val.size / dim)
    local_inds_t = np.arange(dim_size)
    local_inds_n = np.int32(np.kron(np.arange(size), np.ones(dim)))
    norm_jac = sps.csr_matrix(
        (jac_vals.ravel(""F""), (local_inds_n, local_inds_t)),
        shape=(size, dim_size),
    )
    jac = norm_jac * var.jac
    return pp.ad.Ad_array(vals, jac)

","def l2_norm(dim: int, var: pp.ad.Ad_array) -> pp.ad.Ad_array:
    """"""L2 norm of a vector variable.

    For the example of dim=3 components and n vectors, the ordering is assumed
    to be
        [u0, v0, w0, u1, v1, w1, ..., un, vn, wn]

    Usage note:
        See module level documentation on how to wrap functions like this in ad.Function.

    Parameters
    ----------
    dim : int
        Dimension, i.e. number of vector components.
    var : pp.ad.Ad_array
        Ad operator (variable or expression) which is argument of the norm
        function.

    Returns
    -------
    pp.ad.Ad_array
        The norm of var with appropriate val and jac attributes.

    """"""

    if dim == 1:
        return pp.ad.functions.abs(var)
    resh = np.reshape(var.val, (dim, -1), order=""F"")
    vals = np.linalg.norm(resh, axis=0)
    # Avoid dividing by zero
    tol = 1e-12
    nonzero_inds = vals > tol
    jac_vals = np.zeros(resh.shape)
    jac_vals[:, nonzero_inds] = resh[:, nonzero_inds] / vals[nonzero_inds]
    jac_vals[:, ~nonzero_inds] = 1
    # Prepare for left multiplication with var.jac to yield
    # norm(var).jac = var/norm(var) * var.jac
    dim_size = var.val.size
    # Check that size of var is compatible with the given dimension, e.g. all 'cells' have the same number of values assigned
    assert dim_size % dim == 0
    size = int(dim_size / dim)
    local_inds_t = np.arange(dim_size)
    local_inds_n = np.int32(np.kron(np.arange(size), np.ones(dim)))
    norm_jac = sps.csr_matrix(
        (jac_vals.ravel(""F""), (local_inds_n, local_inds_t)),
        shape=(size, dim_size),
    )
    jac = norm_jac * var.jac
    return pp.ad.Ad_array(vals, jac)

"
30441,"def run_bandit(project_dir, py_num):
    lint_files = get_lint_files(project_dir)
    print(""========= Running bandit on: {} ==============="".format(lint_files))
    python_exe = 'python2' if py_num < 3 else 'python3'
    print_v('Using: {} to run flake8'.format(python_exe))
    sys.stdout.flush()
    subprocess.check_call([python_exe, '-m', 'bandit', '-lll', '-iii', lint_files], cwd=CONTENT_DIR)
    print(""bandit completed"")

","def run_bandit(project_dir, py_num):
    lint_files = get_lint_files(project_dir)
    print(""========= Running bandit on: {} ==============="".format(lint_files))
    python_exe = 'python2' if py_num < 3 else 'python3'
    print_v('Using: {} to run bandit'.format(python_exe))
    sys.stdout.flush()
    subprocess.check_call([python_exe, '-m', 'bandit', '-lll', '-iii', lint_files], cwd=CONTENT_DIR)
    print(""bandit completed"")

"
1075,"def bids_gen_info(bids_event_files,
    condition_column='trial_type',
    amplitude_column=None,
    time_repetition=False,
    ):
    """"""Generate subject_info structure from a list of BIDS .tsv event files.

    Parameters
    ----------

    bids_event_files : list of str
        Filenames of BIDS .tsv event files containing columns including:
        'onset', 'duration', and 'trial_type' or the `condition_column` value.
    condition_column : str
        Column of files in `bids_event_files` based on the values of which
        events will be sorted into different regressors
    amplitude_column : str
        Column of files in `bids_event_files` based on the values of which
        to apply amplitudes to events. If unspecified, all events will be
        represented with an amplitude of 1.

    Returns
    -------

    list of Bunch
    """"""
    info = []
    for bids_event_file in bids_event_files:
        with open(bids_event_file) as f:
            f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t')
            events = [{k: v for k, v in row.items()} for row in f_events]
            conditions = list(set([i[condition_column] for i in events]))
            runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[])
            for condition in conditions:
                selected_events = [i for i in events if i[condition_column]==condition]
                onsets = [float(i['onset']) for i in selected_events]
                durations = [float(i['duration']) for i in selected_events]
                if time_repetition:
                    decimals = math.ceil(-math.log10(time_repetition))
                    onsets = [round(i,decimals) for i in onsets]
                    durations = [round(i,decimals) for i in durations]
                if condition:
                    runinfo.conditions.append(condition)
                else:
                    runinfo.conditions.append('e0')
                runinfo.onsets.append(onsets)
                runinfo.durations.append(durations)
                try:
                    amplitudes = [float(i[amplitude_column]) for i in selected_events]
                    runinfo.amplitudes.append(amplitudes)
                except KeyError:
                    runinfo.amplitudes.append([1]*len(onsets))
            info.append(runinfo)
    return info

","def bids_gen_info(bids_event_files,
                  condition_column='trial_type',
    amplitude_column=None,
    time_repetition=False,
    ):
    """"""Generate subject_info structure from a list of BIDS .tsv event files.

    Parameters
    ----------

    bids_event_files : list of str
        Filenames of BIDS .tsv event files containing columns including:
        'onset', 'duration', and 'trial_type' or the `condition_column` value.
    condition_column : str
        Column of files in `bids_event_files` based on the values of which
        events will be sorted into different regressors
    amplitude_column : str
        Column of files in `bids_event_files` based on the values of which
        to apply amplitudes to events. If unspecified, all events will be
        represented with an amplitude of 1.

    Returns
    -------

    list of Bunch
    """"""
    info = []
    for bids_event_file in bids_event_files:
        with open(bids_event_file) as f:
            f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t')
            events = [{k: v for k, v in row.items()} for row in f_events]
            conditions = list(set([i[condition_column] for i in events]))
            runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[])
            for condition in conditions:
                selected_events = [i for i in events if i[condition_column]==condition]
                onsets = [float(i['onset']) for i in selected_events]
                durations = [float(i['duration']) for i in selected_events]
                if time_repetition:
                    decimals = math.ceil(-math.log10(time_repetition))
                    onsets = [round(i,decimals) for i in onsets]
                    durations = [round(i,decimals) for i in durations]
                if condition:
                    runinfo.conditions.append(condition)
                else:
                    runinfo.conditions.append('e0')
                runinfo.onsets.append(onsets)
                runinfo.durations.append(durations)
                try:
                    amplitudes = [float(i[amplitude_column]) for i in selected_events]
                    runinfo.amplitudes.append(amplitudes)
                except KeyError:
                    runinfo.amplitudes.append([1]*len(onsets))
            info.append(runinfo)
    return info

"
12949,"def get_token_from_request(request):
    auth = request.META.get(JWT_AUTH_HEADER, """").split()
    prefix = JWT_AUTH_HEADER_PREFIX

    if len(auth) != 2 or auth[0].lower() != prefix.lower():
        return None
    return auth[1]

","def get_token_from_request(request):
    auth = request.META.get(JWT_AUTH_HEADER, """").split(maxsplit=1)
    prefix = JWT_AUTH_HEADER_PREFIX

    if len(auth) != 2 or auth[0].lower() != prefix.lower():
        return None
    return auth[1]

"
477,"def parse_toggle(entry):
    """"""
    Split a toggle entry into the namespace an the item.
    :return: tuple(namespace, item)
    """"""
    from corehq.toggles import NAMESPACE_DOMAIN, NAMESPACE_EMAIL_DOMAIN
    namespace = None
    if entry.startswith(NAMESPACE_DOMAIN) or entry.startswith(NAMESPACE_EMAIL_DOMAIN):
        namespace, entry = entry.split("":"")
    return namespace, entry

","def parse_toggle(entry):
    """"""
    Split a toggle entry into the namespace an the item.
    :return: tuple(namespace, item)
    """"""
    from corehq.toggles import NAMESPACE_DOMAIN, NAMESPACE_EMAIL_DOMAIN
    namespace = None
    if entry.startswith((NAMESPACE_DOMAIN + "":"", NAMESPACE_EMAIL_DOMAIN + "":"")):
        namespace, entry = entry.split("":"")
    return namespace, entry

"
22487,"def assert_has_archive_member(output_bytes, path, verify_assertions_function, children, all=""false"", n: str = None, delta: str = 0, min: str = None, max: str = None, negate: bool = False):
    """""" Recursively checks the specified children assertions against the text of
    the first element matching the specified path found within the archive.
    Currently supported formats: .zip, .tar, .tar.gz.""""""
    all = asbool(all)
    extract_foo = None
    # from python 3.9 is_tarfile supports file like objects then we do not need
    # the tempfile detour but can use io.BytesIO(output_bytes)
    with tempfile.NamedTemporaryFile() as tmp:
        tmp.write(output_bytes)
        tmp.flush()
        if zipfile.is_zipfile(tmp.name):
            extract_foo = _extract_from_zip
            list_foo = _list_from_zip
        elif tarfile.is_tarfile(tmp.name):
            extract_foo = _extract_from_tar
            list_foo = _list_from_tar
    assert extract_foo is not None, f""Expected path '{path}' to be an archive""

    # get list of matching file names in archive and check against n, delta,
    # min, max (slightly abusing the output and text as well as the function
    # parameters)
    fns = list_foo(output_bytes, path)
    _assert_presence_number(None, path, n, delta, min, max, negate,
        lambda o, t: len(fns) > 0,
        lambda o, t: len(fns),
        ""{expected} path '{text}' in archive"",
        ""{expected} {n}+-{delta} matches for path '{text}' in archive"",
        ""{expected} that the number of matches for path '{text}' in archive is in [{min}:{max}]"")

    # check sub-assertions on members matching path
    for fn in fns:
        contents = extract_foo(output_bytes, fn)
        try:
            verify_assertions_function(contents, children)
        except AssertionError as e:
            raise AssertionError(f""Archive member '{path}': {str(e)}"")
        if not all:
            break
","def assert_has_archive_member(output_bytes, path, verify_assertions_function, children, all=""false"", n: Optional[Union[str, int]] = None, delta: Union[str, int] = 0, min: Optional[Union[str, int]] = None, max: Optional[Union[str, int]] = None, negate: bool = False):
    """""" Recursively checks the specified children assertions against the text of
    the first element matching the specified path found within the archive.
    Currently supported formats: .zip, .tar, .tar.gz.""""""
    all = asbool(all)
    extract_foo = None
    # from python 3.9 is_tarfile supports file like objects then we do not need
    # the tempfile detour but can use io.BytesIO(output_bytes)
    with tempfile.NamedTemporaryFile() as tmp:
        tmp.write(output_bytes)
        tmp.flush()
        if zipfile.is_zipfile(tmp.name):
            extract_foo = _extract_from_zip
            list_foo = _list_from_zip
        elif tarfile.is_tarfile(tmp.name):
            extract_foo = _extract_from_tar
            list_foo = _list_from_tar
    assert extract_foo is not None, f""Expected path '{path}' to be an archive""

    # get list of matching file names in archive and check against n, delta,
    # min, max (slightly abusing the output and text as well as the function
    # parameters)
    fns = list_foo(output_bytes, path)
    _assert_presence_number(None, path, n, delta, min, max, negate,
        lambda o, t: len(fns) > 0,
        lambda o, t: len(fns),
        ""{expected} path '{text}' in archive"",
        ""{expected} {n}+-{delta} matches for path '{text}' in archive"",
        ""{expected} that the number of matches for path '{text}' in archive is in [{min}:{max}]"")

    # check sub-assertions on members matching path
    for fn in fns:
        contents = extract_foo(output_bytes, fn)
        try:
            verify_assertions_function(contents, children)
        except AssertionError as e:
            raise AssertionError(f""Archive member '{path}': {str(e)}"")
        if not all:
            break
"
30243,"def format_http_transaction_list(uuid):
    # Scan Lists sometimes returns empty
    scan_lists = None
    while scan_lists is None:
        try:
            response = urlscan_submit_request(uuid)
            scan_lists = response['lists']
        except Exception:
            pass

    url = demisto.args().get('url')
    limit = int(demisto.args().get('limit'))
    metadata = None
    if limit > 100:
        limit = 100
        metadata = ""Limited the data to the first 100 http transactions""

    url_list = scan_lists.get('urls', [])[:limit]

    context = {
        'URL': url,
        'httpTransaction': url_list
    }

    ec = {
        'URLScan(val.URL && val.URL == obj.URL)': context,
        'URL': url
    }

    human_readable = tableToMarkdown('{} - http transaction list'.format(url), url_list, ['URLs'], metadata=metadata)
    return_outputs(human_readable, ec, response)

","def format_http_transaction_list(uuid):
    # Scan Lists sometimes returns empty
    scan_lists = None
    while scan_lists is None:
        try:
            response = urlscan_submit_request(uuid)
            scan_lists = response['lists']
        except Exception:
            pass

    url = demisto.args().get('url')
    limit = int(demisto.args().get('limit'))
    metadata = None
    if limit > 100:
        limit = 100
        metadata = ""Limited the data to the first 100 http transactions""

    url_list = scan_lists.get('urls', [])[:limit]

    context = {
        'URL': url,
        'httpTransaction': url_list
    }

    ec = {
        'URLScan(val.URL && val.URL == obj.URL)': context,
        'URL.Data': url
    }

    human_readable = tableToMarkdown('{} - http transaction list'.format(url), url_list, ['URLs'], metadata=metadata)
    return_outputs(human_readable, ec, response)

"
2665,"def test_mnb_prior_unobserved_targets():
    # test smoothing of prior for yet unobserved targets

    # Create toy training data
    X = np.array([[0, 1], [1, 0]])
    y = np.array([0, 1])

    clf = MultinomialNB()

    with warnings.catch_warnings():
        warnings.simplefilter(""error"", UserWarning)

        clf.partial_fit(X, y, classes=[0, 1, 2])

    assert clf.predict([[0, 1]]) == 0
    assert clf.predict([[1, 0]]) == 1
    assert clf.predict([[1, 1]]) == 0

    # add a training example with previously unobserved class
    with warnings.catch_warnings():
        warnings.simplefilter(""error"", UserWarning)

        clf.partial_fit([[1, 1]], [2])

    assert clf.predict([[0, 1]]) == 0
    assert clf.predict([[1, 0]]) == 1
    assert clf.predict([[1, 1]]) == 2

","def test_mnb_prior_unobserved_targets():
    # test smoothing of prior for yet unobserved targets

    # Create toy training data
    X = np.array([[0, 1], [1, 0]])
    y = np.array([0, 1])

    clf = MultinomialNB()

    with warnings.catch_warnings():
        warnings.simplefilter(""error"", UserWarning)

        clf.partial_fit(X, y, classes=[0, 1, 2])

    assert clf.predict([[0, 1]]) == 0
    assert clf.predict([[1, 0]]) == 1
    assert clf.predict([[1, 1]]) == 0

    # add a training example with previously unobserved class
    with warnings.catch_warnings():
        warnings.simplefilter(""error"", RuntimeWarning)

        clf.partial_fit([[1, 1]], [2])

    assert clf.predict([[0, 1]]) == 0
    assert clf.predict([[1, 0]]) == 1
    assert clf.predict([[1, 1]]) == 2

"
45709,"def forecast(
    R,
    V,
    timesteps,
    n_cascade_levels=6,
    R_thr=None,
    extrap_method=""semilagrangian"",
    decomp_method=""fft"",
    bandpass_filter_method=""gaussian"",
    ar_order=2,
    conditional=False,
    probmatching_method=""cdf"",
    num_workers=1,
    fft_method=""numpy"",
    domain=""spatial"",
    extrap_kwargs=None,
    filter_kwargs=None,
    measure_time=False,
):
    """"""Generate a nowcast by using the Spectral Prognosis (S-PROG) method.

    Parameters
    ----------
    R: array-like
      Array of shape (ar_order+1,m,n) containing the input precipitation fields
      ordered by timestamp from oldest to newest. The time steps between
      the inputs are assumed to be regular.
    V: array-like
      Array of shape (2,m,n) containing the x- and y-components of the
      advection field.
      The velocities are assumed to represent one time step between the
      inputs. All values are required to be finite.
    timesteps: int or list
      Number of time steps to forecast or a list of time steps for which the
      forecasts are computed (relative to the input time step). The elements of
      the list are required to be in ascending order.
    n_cascade_levels: int, optional
      The number of cascade levels to use.
    R_thr: float
      The threshold value for minimum observable precipitation intensity.
    extrap_method: str, optional
      Name of the extrapolation method to use. See the documentation of
      pysteps.extrapolation.interface.
    decomp_method: {'fft'}, optional
      Name of the cascade decomposition method to use. See the documentation
      of pysteps.cascade.interface.
    bandpass_filter_method: {'gaussian', 'uniform'}, optional
      Name of the bandpass filter method to use with the cascade decomposition.
      See the documentation of pysteps.cascade.interface.
    ar_order: int, optional
      The order of the autoregressive model to use. Must be >= 1.
    conditional: bool, optional
      If set to True, compute the statistics of the precipitation field
      conditionally by excluding pixels where the values are
      below the threshold R_thr.
    probmatching_method: {'cdf','mean',None}, optional
      Method for matching the conditional statistics of the forecast field
      (areas with precipitation intensity above the threshold R_thr) with those
      of the most recently observed one. 'cdf'=map the forecast CDF to the
      observed one, 'mean'=adjust only the mean value,
      None=no matching applied.
    num_workers: int, optional
      The number of workers to use for parallel computation. Applicable if dask
      is enabled or pyFFTW is used for computing the FFT.
      When num_workers>1, it is advisable to disable OpenMP by setting
      the environment variable OMP_NUM_THREADS to 1.
      This avoids slowdown caused by too many simultaneous threads.
    fft_method: str, optional
      A string defining the FFT method to use (see utils.fft.get_method).
      Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
      the recommended method is 'pyfftw'.
    domain: {""spatial"", ""spectral""}
      If ""spatial"", all computations are done in the spatial domain (the
      classical S-PROG model). If ""spectral"", the AR(2) models are applied
      directly in the spectral domain to reduce memory footprint and improve
      performance :cite:`PCH2019a`.
    extrap_kwargs: dict, optional
      Optional dictionary containing keyword arguments for the extrapolation
      method. See the documentation of pysteps.extrapolation.
    filter_kwargs: dict, optional
      Optional dictionary containing keyword arguments for the filter method.
      See the documentation of pysteps.cascade.bandpass_filters.py.
    measure_time: bool
      If set to True, measure, print and return the computation time.

    Returns
    -------
    out: ndarray
      A three-dimensional array of shape (num_timesteps,m,n) containing a time
      series of forecast precipitation fields. The time series starts from
      t0+timestep, where timestep is taken from the input precipitation fields
      R. If measure_time is True, the return value is a three-element tuple
      containing the nowcast array, the initialization time of the nowcast
      generator and the time used in the main loop (seconds).

    See also
    --------
    pysteps.extrapolation.interface, pysteps.cascade.interface

    References
    ----------
    :cite:`Seed2003`, :cite:`PCH2019a`

    """"""
    _check_inputs(R, V, timesteps, ar_order)

    if extrap_kwargs is None:
        extrap_kwargs = dict()

    if filter_kwargs is None:
        filter_kwargs = dict()

    if np.any(~np.isfinite(V)):
        raise ValueError(""V contains non-finite values"")

    print(""Computing S-PROG nowcast:"")
    print(""-------------------------"")
    print("""")

    print(""Inputs:"")
    print(""-------"")
    print(""input dimensions: %dx%d"" % (R.shape[1], R.shape[2]))
    print("""")

    print(""Methods:"")
    print(""--------"")
    print(""extrapolation:          %s"" % extrap_method)
    print(""bandpass filter:        %s"" % bandpass_filter_method)
    print(""decomposition:          %s"" % decomp_method)
    print(""conditional statistics: %s"" % (""yes"" if conditional else ""no""))
    print(""probability matching:   %s"" % probmatching_method)
    print(""FFT method:             %s"" % fft_method)
    print(""domain:                 %s"" % domain)
    print("""")

    print(""Parameters:"")
    print(""-----------"")
    if isinstance(timesteps, int):
        print(""number of time steps:     %d"" % timesteps)
    else:
        print(""time steps:               %s"" % timesteps)
    print(""parallel threads:         %d"" % num_workers)
    print(""number of cascade levels: %d"" % n_cascade_levels)
    print(""order of the AR(p) model: %d"" % ar_order)
    print(""precip. intensity threshold: %g"" % R_thr)

    if measure_time:
        starttime_init = time.time()

    fft = utils.get_method(fft_method, shape=R.shape[1:], n_threads=num_workers)

    M, N = R.shape[1:]

    # initialize the band-pass filter
    filter_method = cascade.get_method(bandpass_filter_method)
    filter = filter_method((M, N), n_cascade_levels, **filter_kwargs)

    decomp_method, recomp_method = cascade.get_method(decomp_method)

    extrapolator_method = extrapolation.get_method(extrap_method)

    R = R[-(ar_order + 1) :, :, :].copy()
    R_min = np.nanmin(R)

    # determine the domain mask from non-finite values
    domain_mask = np.logical_or.reduce(
        [~np.isfinite(R[i, :]) for i in range(R.shape[0])]
    )

    # determine the precipitation threshold mask
    if conditional:
        MASK_thr = np.logical_and.reduce(
            [R[i, :, :] >= R_thr for i in range(R.shape[0])]
        )
    else:
        MASK_thr = None

    # initialize the extrapolator
    x_values, y_values = np.meshgrid(np.arange(R.shape[2]), np.arange(R.shape[1]))

    xy_coords = np.stack([x_values, y_values])

    extrap_kwargs = extrap_kwargs.copy()
    extrap_kwargs[""xy_coords""] = xy_coords
    extrap_kwargs[""allow_nonfinite_values""] = True

    # advect the previous precipitation fields to the same position with the
    # most recent one (i.e. transform them into the Lagrangian coordinates)
    res = list()

    def f(R, i):
        return extrapolator_method(R[i, :], V, ar_order - i, ""min"", **extrap_kwargs)[-1]

    for i in range(ar_order):
        if not DASK_IMPORTED:
            R[i, :, :] = f(R, i)
        else:
            res.append(dask.delayed(f)(R, i))

    if DASK_IMPORTED:
        num_workers_ = len(res) if num_workers > len(res) else num_workers
        R = np.stack(list(dask.compute(*res, num_workers=num_workers_)) + [R[-1, :, :]])

    # replace non-finite values with the minimum value
    R = R.copy()
    for i in range(R.shape[0]):
        R[i, ~np.isfinite(R[i, :])] = np.nanmin(R[i, :])

    # compute the cascade decompositions of the input precipitation fields
    R_d = []
    for i in range(ar_order + 1):
        R_ = decomp_method(
            R[i, :, :],
            filter,
            mask=MASK_thr,
            fft_method=fft,
            output_domain=domain,
            normalize=True,
            compute_stats=True,
            compact_output=True,
        )
        R_d.append(R_)

    # rearrange the cascade levels into a four-dimensional array of shape
    # (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
    R_c = nowcast_utils.stack_cascades(
        R_d, n_cascade_levels, convert_to_full_arrays=True
    )

    # compute lag-l temporal autocorrelation coefficients for each cascade level
    GAMMA = np.empty((n_cascade_levels, ar_order))
    for i in range(n_cascade_levels):
        if domain == ""spatial"":
            GAMMA[i, :] = correlation.temporal_autocorrelation(R_c[i], mask=MASK_thr)
        else:
            GAMMA[i, :] = correlation.temporal_autocorrelation(
                R_c[i], domain=""spectral"", x_shape=R.shape[1:]
            )

    R_c = nowcast_utils.stack_cascades(
        R_d, n_cascade_levels, convert_to_full_arrays=False
    )

    R_d = R_d[-1]

    nowcast_utils.print_corrcoefs(GAMMA)

    if ar_order == 2:
        # adjust the lag-2 correlation coefficient to ensure that the AR(p)
        # process is stationary
        for i in range(n_cascade_levels):
            GAMMA[i, 1] = autoregression.adjust_lag2_corrcoef2(GAMMA[i, 0], GAMMA[i, 1])

    # estimate the parameters of the AR(p) model from the autocorrelation
    # coefficients
    PHI = np.empty((n_cascade_levels, ar_order + 1))
    for i in range(n_cascade_levels):
        PHI[i, :] = autoregression.estimate_ar_params_yw(GAMMA[i, :])

    nowcast_utils.print_ar_params(PHI)

    # discard all except the p-1 last cascades because they are not needed for
    # the AR(p) model
    R_c = [R_c[i][-ar_order:] for i in range(n_cascade_levels)]

    if probmatching_method == ""mean"":
        mu_0 = np.mean(R[-1, :, :][R[-1, :, :] >= R_thr])

    # compute precipitation mask and wet area ratio
    MASK_p = R[-1, :, :] >= R_thr
    war = 1.0 * np.sum(MASK_p) / (R.shape[1] * R.shape[2])

    if measure_time:
        init_time = time.time() - starttime_init

    R = R[-1, :, :]

    print(""Starting nowcast computation."")

    if measure_time:
        starttime_mainloop = time.time()

    R_f = []

    if isinstance(timesteps, int):
        timesteps = range(timesteps + 1)
        timestep_type = ""int""
    else:
        original_timesteps = [0] + list(timesteps)
        timesteps = nowcast_utils.binned_timesteps(original_timesteps)
        timestep_type = ""list""

    R_f_prev = R
    extrap_kwargs[""return_displacement""] = True

    D = None
    t_nowcast = 0
    t_prev = 0.0

    # iterate each time step
    for t in range(len(timesteps)):
        if timestep_type == ""list"":
            subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]]
        else:
            subtimesteps = [t]

        if len(subtimesteps) > 1 or t > 0:
            nowcast_time_step = True
        else:
            nowcast_time_step = False

        if nowcast_time_step:
            print(
                ""Computing nowcast for time step %d... "" % (t_nowcast + 1),
                end="""",
                flush=True,
            )
            t_nowcast += 1

        if measure_time:
            starttime = time.time()

        for i in range(n_cascade_levels):
            R_c[i] = autoregression.iterate_ar_model(R_c[i], PHI[i, :])

        R_d[""cascade_levels""] = [R_c[i][-1, :] for i in range(n_cascade_levels)]
        if domain == ""spatial"":
            R_d[""cascade_levels""] = np.stack(R_d[""cascade_levels""])

        R_f_new = recomp_method(R_d)

        if domain == ""spectral"":
            R_f_new = fft.irfft2(R_f_new)

        MASK = _compute_sprog_mask(R_f_new, war)
        R_f_new[~MASK] = R_min

        if probmatching_method == ""cdf"":
            # adjust the CDF of the forecast to match the most recently
            # observed precipitation field
            R_f_new = probmatching.nonparam_match_empirical_cdf(R_f_new, R)
        elif probmatching_method == ""mean"":
            mu_fct = np.mean(R_f_new[MASK])
            R_f_new[MASK] = R_f_new[MASK] - mu_fct + mu_0

        R_f_new[domain_mask] = np.nan

        # advect the recomposed precipitation field to obtain the forecast for
        # the current time step (or subtimesteps if non-integer time steps are
        # given)
        for t_sub in subtimesteps:
            if t_sub > 0:
                t_diff_prev_int = t_sub - int(t_sub)
                if t_diff_prev_int > 0.0:
                    R_f_ip = (
                        1.0 - t_diff_prev_int
                    ) * R_f_prev + t_diff_prev_int * R_f_new
                else:
                    R_f_ip = R_f_prev

                t_diff_prev = t_sub - t_prev
                extrap_kwargs[""displacement_prev""] = D
                R_f_ep, D = extrapolator_method(
                    R_f_ip,
                    V,
                    [t_diff_prev],
                    **extrap_kwargs,
                )
                R_f.append(R_f_ep[0])
                t_prev = t_sub

        # advect the forecast field by one time step if no subtimesteps in the
        # current interval were found
        if len(subtimesteps) == 0:
            t_diff_prev = t + 1 - t_prev
            extrap_kwargs[""displacement_prev""] = D
            _, D = extrapolator_method(
                None,
                V,
                [t_diff_prev],
                **extrap_kwargs,
            )
            t_prev = t + 1

        R_f_prev = R_f_new

        if nowcast_time_step:
            if measure_time:
                print(""%.2f seconds."" % (time.time() - starttime))
            else:
                print(""done."")

    if measure_time:
        mainloop_time = time.time() - starttime_mainloop

    R_f = np.stack(R_f)

    if measure_time:
        return R_f, init_time, mainloop_time
    else:
        return R_f

","def forecast(
    R,
    V,
    timesteps,
    n_cascade_levels=6,
    R_thr=None,
    extrap_method=""semilagrangian"",
    decomp_method=""fft"",
    bandpass_filter_method=""gaussian"",
    ar_order=2,
    conditional=False,
    probmatching_method=""cdf"",
    num_workers=1,
    fft_method=""numpy"",
    domain=""spatial"",
    extrap_kwargs=None,
    filter_kwargs=None,
    measure_time=False,
):
    """"""Generate a nowcast by using the Spectral Prognosis (S-PROG) method.

    Parameters
    ----------
    R: array-like
      Array of shape (ar_order+1,m,n) containing the input precipitation fields
      ordered by timestamp from oldest to newest. The time steps between
      the inputs are assumed to be regular.
    V: array-like
      Array of shape (2,m,n) containing the x- and y-components of the
      advection field.
      The velocities are assumed to represent one time step between the
      inputs. All values are required to be finite.
    timesteps: int or list
      Number of time steps to forecast or a list of time steps for which the
      forecasts are computed (relative to the input time step). The elements of
      the list are required to be in ascending order.
    n_cascade_levels: int, optional
      The number of cascade levels to use.
    R_thr: float
      The threshold value for minimum observable precipitation intensity.
    extrap_method: str, optional
      Name of the extrapolation method to use. See the documentation of
      pysteps.extrapolation.interface.
    decomp_method: {'fft'}, optional
      Name of the cascade decomposition method to use. See the documentation
      of pysteps.cascade.interface.
    bandpass_filter_method: {'gaussian', 'uniform'}, optional
      Name of the bandpass filter method to use with the cascade decomposition.
      See the documentation of pysteps.cascade.interface.
    ar_order: int, optional
      The order of the autoregressive model to use. Must be >= 1.
    conditional: bool, optional
      If set to True, compute the statistics of the precipitation field
      conditionally by excluding pixels where the values are
      below the threshold R_thr.
    probmatching_method: {'cdf','mean',None}, optional
      Method for matching the conditional statistics of the forecast field
      (areas with precipitation intensity above the threshold R_thr) with those
      of the most recently observed one. 'cdf'=map the forecast CDF to the
      observed one, 'mean'=adjust only the mean value,
      None=no matching applied.
    num_workers: int, optional
      The number of workers to use for parallel computation. Applicable if dask
      is enabled or pyFFTW is used for computing the FFT.
      When num_workers>1, it is advisable to disable OpenMP by setting
      the environment variable OMP_NUM_THREADS to 1.
      This avoids slowdown caused by too many simultaneous threads.
    fft_method: str, optional
      A string defining the FFT method to use (see utils.fft.get_method).
      Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
      the recommended method is 'pyfftw'.
    domain: {""spatial"", ""spectral""}
      If ""spatial"", all computations are done in the spatial domain (the
      classical S-PROG model). If ""spectral"", the AR(2) models are applied
      directly in the spectral domain to reduce memory footprint and improve
      performance :cite:`PCH2019a`.
    extrap_kwargs: dict, optional
      Optional dictionary containing keyword arguments for the extrapolation
      method. See the documentation of pysteps.extrapolation.
    filter_kwargs: dict, optional
      Optional dictionary containing keyword arguments for the filter method.
      See the documentation of pysteps.cascade.bandpass_filters.py.
    measure_time: bool
      If set to True, measure, print and return the computation time.

    Returns
    -------
    out: ndarray
      A three-dimensional array of shape (num_timesteps,m,n) containing a time
      series of forecast precipitation fields. The time series starts from
      t0+timestep, where timestep is taken from the input precipitation fields
      R. If measure_time is True, the return value is a three-element tuple
      containing the nowcast array, the initialization time of the nowcast
      generator and the time used in the main loop (seconds).

    See also
    --------
    pysteps.extrapolation.interface, pysteps.cascade.interface

    References
    ----------
    :cite:`Seed2003`, :cite:`PCH2019a`

    """"""
    _check_inputs(R, V, timesteps, ar_order)

    if extrap_kwargs is None:
        extrap_kwargs = dict()

    if filter_kwargs is None:
        filter_kwargs = dict()

    if np.any(~np.isfinite(V)):
        raise ValueError(""V contains non-finite values"")

    print(""Computing S-PROG nowcast:"")
    print(""-------------------------"")
    print("""")

    print(""Inputs:"")
    print(""-------"")
    print(""input dimensions: %dx%d"" % (R.shape[1], R.shape[2]))
    print("""")

    print(""Methods:"")
    print(""--------"")
    print(""extrapolation:          %s"" % extrap_method)
    print(""bandpass filter:        %s"" % bandpass_filter_method)
    print(""decomposition:          %s"" % decomp_method)
    print(""conditional statistics: %s"" % (""yes"" if conditional else ""no""))
    print(""probability matching:   %s"" % probmatching_method)
    print(""FFT method:             %s"" % fft_method)
    print(""domain:                 %s"" % domain)
    print("""")

    print(""Parameters:"")
    print(""-----------"")
    if isinstance(timesteps, int):
        print(""number of time steps:     %d"" % timesteps)
    else:
        print(""time steps:               %s"" % timesteps)
    print(""parallel threads:         %d"" % num_workers)
    print(""number of cascade levels: %d"" % n_cascade_levels)
    print(""order of the AR(p) model: %d"" % ar_order)
    print(""precip. intensity threshold: %g"" % R_thr)

    if measure_time:
        starttime_init = time.time()

    fft = utils.get_method(fft_method, shape=R.shape[1:], n_threads=num_workers)

    M, N = R.shape[1:]

    # initialize the band-pass filter
    filter_method = cascade.get_method(bandpass_filter_method)
    filter = filter_method((M, N), n_cascade_levels, **filter_kwargs)

    decomp_method, recomp_method = cascade.get_method(decomp_method)

    extrapolator_method = extrapolation.get_method(extrap_method)

    R = R[-(ar_order + 1) :, :, :].copy()
    R_min = np.nanmin(R)

    # determine the domain mask from non-finite values
    domain_mask = np.logical_or.reduce(
        [~np.isfinite(R[i, :]) for i in range(R.shape[0])]
    )

    # determine the precipitation threshold mask
    if conditional:
        MASK_thr = np.logical_and.reduce(
            [R[i, :, :] >= R_thr for i in range(R.shape[0])]
        )
    else:
        MASK_thr = None

    # initialize the extrapolator
    x_values, y_values = np.meshgrid(np.arange(R.shape[2]), np.arange(R.shape[1]))

    xy_coords = np.stack([x_values, y_values])

    extrap_kwargs = extrap_kwargs.copy()
    extrap_kwargs[""xy_coords""] = xy_coords
    extrap_kwargs[""allow_nonfinite_values""] = True

    # advect the previous precipitation fields to the same position with the
    # most recent one (i.e. transform them into the Lagrangian coordinates)
    res = list()

    def f(R, i):
        return extrapolator_method(R[i, :], V, ar_order - i, ""min"", **extrap_kwargs)[-1]

    for i in range(ar_order):
        if not DASK_IMPORTED:
            R[i, :, :] = f(R, i)
        else:
            res.append(dask.delayed(f)(R, i))

    if DASK_IMPORTED:
        num_workers_ = len(res) if num_workers > len(res) else num_workers
        R = np.stack(list(dask.compute(*res, num_workers=num_workers_)) + [R[-1, :, :]])

    # replace non-finite values with the minimum value
    R = R.copy()
    for i in range(R.shape[0]):
        R[i, ~np.isfinite(R[i, :])] = np.nanmin(R[i, :])

    # compute the cascade decompositions of the input precipitation fields
    R_d = []
    for i in range(ar_order + 1):
        R_ = decomp_method(
            R[i, :, :],
            filter,
            mask=MASK_thr,
            fft_method=fft,
            output_domain=domain,
            normalize=True,
            compute_stats=True,
            compact_output=True,
        )
        R_d.append(R_)

    # rearrange the cascade levels into a four-dimensional array of shape
    # (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
    R_c = nowcast_utils.stack_cascades(
        R_d, n_cascade_levels, convert_to_full_arrays=True
    )

    # compute lag-l temporal autocorrelation coefficients for each cascade level
    GAMMA = np.empty((n_cascade_levels, ar_order))
    for i in range(n_cascade_levels):
        if domain == ""spatial"":
            GAMMA[i, :] = correlation.temporal_autocorrelation(R_c[i], mask=MASK_thr)
        else:
            GAMMA[i, :] = correlation.temporal_autocorrelation(
                R_c[i], domain=""spectral"", x_shape=R.shape[1:]
            )

    R_c = nowcast_utils.stack_cascades(
        R_d, n_cascade_levels, convert_to_full_arrays=False
    )

    R_d = R_d[-1]

    nowcast_utils.print_corrcoefs(GAMMA)

    if ar_order == 2:
        # adjust the lag-2 correlation coefficient to ensure that the AR(p)
        # process is stationary
        for i in range(n_cascade_levels):
            GAMMA[i, 1] = autoregression.adjust_lag2_corrcoef2(GAMMA[i, 0], GAMMA[i, 1])

    # estimate the parameters of the AR(p) model from the autocorrelation
    # coefficients
    PHI = np.empty((n_cascade_levels, ar_order + 1))
    for i in range(n_cascade_levels):
        PHI[i, :] = autoregression.estimate_ar_params_yw(GAMMA[i, :])

    nowcast_utils.print_ar_params(PHI)

    # discard all except the p-1 last cascades because they are not needed for
    # the AR(p) model
    R_c = [R_c[i][-ar_order:] for i in range(n_cascade_levels)]

    if probmatching_method == ""mean"":
        mu_0 = np.mean(R[-1, :, :][R[-1, :, :] >= R_thr])

    # compute precipitation mask and wet area ratio
    MASK_p = R[-1, :, :] >= R_thr
    war = 1.0 * np.sum(MASK_p) / (R.shape[1] * R.shape[2])

    if measure_time:
        init_time = time.time() - starttime_init

    R = R[-1, :, :]

    print(""Starting nowcast computation."")

    if measure_time:
        starttime_mainloop = time.time()

    R_f = []

    if isinstance(timesteps, int):
        timesteps = range(timesteps + 1)
        timestep_type = ""int""
    else:
        original_timesteps = [0] + list(timesteps)
        timesteps = nowcast_utils.binned_timesteps(original_timesteps)
        timestep_type = ""list""

    R_f_prev = R
    extrap_kwargs[""return_displacement""] = True

    D = None
    t_nowcast = 0
    t_prev = 0.0

    # iterate each time step
    for t, timestep in enumerate(timesteps):
        if timestep_type == ""list"":
            subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]]
        else:
            subtimesteps = [t]

        if len(subtimesteps) > 1 or t > 0:
            nowcast_time_step = True
        else:
            nowcast_time_step = False

        if nowcast_time_step:
            print(
                ""Computing nowcast for time step %d... "" % (t_nowcast + 1),
                end="""",
                flush=True,
            )
            t_nowcast += 1

        if measure_time:
            starttime = time.time()

        for i in range(n_cascade_levels):
            R_c[i] = autoregression.iterate_ar_model(R_c[i], PHI[i, :])

        R_d[""cascade_levels""] = [R_c[i][-1, :] for i in range(n_cascade_levels)]
        if domain == ""spatial"":
            R_d[""cascade_levels""] = np.stack(R_d[""cascade_levels""])

        R_f_new = recomp_method(R_d)

        if domain == ""spectral"":
            R_f_new = fft.irfft2(R_f_new)

        MASK = _compute_sprog_mask(R_f_new, war)
        R_f_new[~MASK] = R_min

        if probmatching_method == ""cdf"":
            # adjust the CDF of the forecast to match the most recently
            # observed precipitation field
            R_f_new = probmatching.nonparam_match_empirical_cdf(R_f_new, R)
        elif probmatching_method == ""mean"":
            mu_fct = np.mean(R_f_new[MASK])
            R_f_new[MASK] = R_f_new[MASK] - mu_fct + mu_0

        R_f_new[domain_mask] = np.nan

        # advect the recomposed precipitation field to obtain the forecast for
        # the current time step (or subtimesteps if non-integer time steps are
        # given)
        for t_sub in subtimesteps:
            if t_sub > 0:
                t_diff_prev_int = t_sub - int(t_sub)
                if t_diff_prev_int > 0.0:
                    R_f_ip = (
                        1.0 - t_diff_prev_int
                    ) * R_f_prev + t_diff_prev_int * R_f_new
                else:
                    R_f_ip = R_f_prev

                t_diff_prev = t_sub - t_prev
                extrap_kwargs[""displacement_prev""] = D
                R_f_ep, D = extrapolator_method(
                    R_f_ip,
                    V,
                    [t_diff_prev],
                    **extrap_kwargs,
                )
                R_f.append(R_f_ep[0])
                t_prev = t_sub

        # advect the forecast field by one time step if no subtimesteps in the
        # current interval were found
        if len(subtimesteps) == 0:
            t_diff_prev = t + 1 - t_prev
            extrap_kwargs[""displacement_prev""] = D
            _, D = extrapolator_method(
                None,
                V,
                [t_diff_prev],
                **extrap_kwargs,
            )
            t_prev = t + 1

        R_f_prev = R_f_new

        if nowcast_time_step:
            if measure_time:
                print(""%.2f seconds."" % (time.time() - starttime))
            else:
                print(""done."")

    if measure_time:
        mainloop_time = time.time() - starttime_mainloop

    R_f = np.stack(R_f)

    if measure_time:
        return R_f, init_time, mainloop_time
    else:
        return R_f

"
52494,"def get_docket_ids_missing_info(num_to_get):
    docket_ids = set()
    docket_ids.update(
        Docket.objects.filter(
            date_filed__isnull=True, source__in=Docket.RECAP_SOURCES
        )
        .order_by(""-view_count"")[:num_to_get]
        .values_list(""pk"", flat=True)
    )
    return docket_ids

","def get_docket_ids_missing_info(num_to_get):
    return set(
        Docket.objects.filter(
            date_filed__isnull=True, source__in=Docket.RECAP_SOURCES
        )
        .order_by(""-view_count"")[:num_to_get]
        .values_list(""pk"", flat=True)
    )
"
55777,"def _register_action_obj(action: Action) -> DisposeCallable:
    """"""Register an Action object. Return a function that unregisters the action.

    Helper for `register_action()`.
    """"""
    # command
    disposers = [
        CommandsRegistry.instance().register_command(
            action.id, action.run, action.title
        )
    ]

    # menu

    items = []
    for rule in action.menus or ():
        menu_item = MenuItem(
            command=action, when=rule.when, group=rule.group, order=rule.order
        )
        items.append((rule.id, menu_item))

    disposers.append(MenuRegistry.instance().append_menu_items(items))
    if action.add_to_command_palette:
        # TODO: dispose
        MenuRegistry.instance().add_commands(action)

    # keybinding
    for keyb in action.keybindings or ():
        if _d := KeybindingsRegistry.instance().register_keybinding_rule(
            action.id, keyb
        ):
            disposers.append(_d)

    def _dispose():
        for d in disposers:
            d()

    return _dispose
","def _register_action_obj(action: Action) -> DisposeCallable:
    """"""Register an Action object. Return a function that unregisters the action.

    Helper for `register_action()`.
    """"""
    # command
    disposers = [
        CommandsRegistry.instance().register_command(
            action.id, action.run, action.title
        )
    ]

    # menu

    items = []
    for rule in action.menus or ():
        menu_item = MenuItem(
            command=action, when=rule.when, group=rule.group, order=rule.order
        )
        items.append((rule.id, menu_item))

    disposers.append(MenuRegistry.instance().append_menu_items(items))
    if action.add_to_command_palette:
        # TODO: dispose
        MenuRegistry.instance().add_commands(action)

    # keybinding
    reg = KeybindingsRegistry.instance()
    for keyb in action.keybindings or ():
        if _d := reg.register_keybinding_rule(action.id, keyb):
            disposers.append(_d)

    def _dispose():
        for d in disposers:
            d()

    return _dispose
"
38964,"def test_model_export_nested_list():
    class Foo(BaseModel):
        a: int = 1
        b: int = 2

    class Bar(BaseModel):
        c: int
        foos: List[Foo]

    m = Bar(c=3, foos=[Foo(), Foo()])
    exclusion = {idx: {'a'} for idx in range(len(m.foos))}
    assert m.dict(exclude={'foos': exclusion}) == {'c': 3, 'foos': [{'b': 2}, {'b': 2}]}

    with pytest.raises(TypeError) as e:
        m.dict(exclude={'foos': {'a'}})
    assert 'expected integer keys' in str(e.value)

","def test_model_export_nested_list():
    class Foo(BaseModel):
        a: int = 1
        b: int = 2

    class Bar(BaseModel):
        c: int
        foos: List[Foo]

    m = Bar(c=3, foos=[Foo(), Foo()])
    exclusion = {0: {'a'}, 1: {'a'}}
    assert m.dict(exclude={'foos': exclusion}) == {'c': 3, 'foos': [{'b': 2}, {'b': 2}]}

    with pytest.raises(TypeError) as e:
        m.dict(exclude={'foos': {'a'}})
    assert 'expected integer keys' in str(e.value)

"
38961,"def Field(
    default: Any = Undefined,
    *,
    default_factory: Any = None,
    alias: str = None,
    title: str = None,
    description: str = None,
    const: bool = None,
    gt: float = None,
    ge: float = None,
    lt: float = None,
    le: float = None,
    multiple_of: float = None,
    min_items: int = None,
    max_items: int = None,
    min_length: int = None,
    max_length: int = None,
    regex: str = None,
    **extra: Any,
) -> Any:
    """"""
    Used to provide extra information about a field, either for the model schema or complex valiation. Some arguments
    apply only to number fields (``int``, ``float``, ``Decimal``) and some apply only to ``str``.

    :param default: since this is replacing the field’s default, its first argument is used
      to set the default, use ellipsis (``...``) to indicate the field is required
    :param default_factory: callable that will be called when a default value is needed for this field
      If both `default` and `default_factory` are set, an error is raised.
    :param alias: the public name of the field
    :param title: can be any string, used in the schema
    :param description: can be any string, used in the schema
    :param const: this field is required and *must* take it's default value
    :param gt: only applies to numbers, requires the field to be ""greater than"". The schema
      will have an ``exclusiveMinimum`` validation keyword
    :param ge: only applies to numbers, requires the field to be ""greater than or equal to"". The
      schema will have a ``minimum`` validation keyword
    :param lt: only applies to numbers, requires the field to be ""less than"". The schema
      will have an ``exclusiveMaximum`` validation keyword
    :param le: only applies to numbers, requires the field to be ""less than or equal to"". The
      schema will have a ``maximum`` validation keyword
    :param multiple_of: only applies to numbers, requires the field to be ""a multiple of"". The
      schema will have a ``multipleOf`` validation keyword
    :param min_length: only applies to strings, requires the field to have a minimum length. The
      schema will have a ``maximum`` validation keyword
    :param max_length: only applies to strings, requires the field to have a maximum length. The
      schema will have a ``maxLength`` validation keyword
    :param regex: only applies to strings, requires the field match agains a regular expression
      pattern string. The schema will have a ``pattern`` validation keyword
    :param **extra: any additional keyword arguments will be added as is to the schema
    """"""
    return FieldInfo(
        default,
        default_factory=default_factory,
        alias=alias,
        title=title,
        description=description,
        const=const,
        gt=gt,
        ge=ge,
        lt=lt,
        le=le,
        multiple_of=multiple_of,
        min_items=min_items,
        max_items=max_items,
        min_length=min_length,
        max_length=max_length,
        regex=regex,
        **extra,
    )
","def Field(
    default: Any = Undefined,
    *,
    default_factory: Optional[Callable[[], Any]] = None,
    alias: str = None,
    title: str = None,
    description: str = None,
    const: bool = None,
    gt: float = None,
    ge: float = None,
    lt: float = None,
    le: float = None,
    multiple_of: float = None,
    min_items: int = None,
    max_items: int = None,
    min_length: int = None,
    max_length: int = None,
    regex: str = None,
    **extra: Any,
) -> Any:
    """"""
    Used to provide extra information about a field, either for the model schema or complex valiation. Some arguments
    apply only to number fields (``int``, ``float``, ``Decimal``) and some apply only to ``str``.

    :param default: since this is replacing the field’s default, its first argument is used
      to set the default, use ellipsis (``...``) to indicate the field is required
    :param default_factory: callable that will be called when a default value is needed for this field
      If both `default` and `default_factory` are set, an error is raised.
    :param alias: the public name of the field
    :param title: can be any string, used in the schema
    :param description: can be any string, used in the schema
    :param const: this field is required and *must* take it's default value
    :param gt: only applies to numbers, requires the field to be ""greater than"". The schema
      will have an ``exclusiveMinimum`` validation keyword
    :param ge: only applies to numbers, requires the field to be ""greater than or equal to"". The
      schema will have a ``minimum`` validation keyword
    :param lt: only applies to numbers, requires the field to be ""less than"". The schema
      will have an ``exclusiveMaximum`` validation keyword
    :param le: only applies to numbers, requires the field to be ""less than or equal to"". The
      schema will have a ``maximum`` validation keyword
    :param multiple_of: only applies to numbers, requires the field to be ""a multiple of"". The
      schema will have a ``multipleOf`` validation keyword
    :param min_length: only applies to strings, requires the field to have a minimum length. The
      schema will have a ``maximum`` validation keyword
    :param max_length: only applies to strings, requires the field to have a maximum length. The
      schema will have a ``maxLength`` validation keyword
    :param regex: only applies to strings, requires the field match agains a regular expression
      pattern string. The schema will have a ``pattern`` validation keyword
    :param **extra: any additional keyword arguments will be added as is to the schema
    """"""
    return FieldInfo(
        default,
        default_factory=default_factory,
        alias=alias,
        title=title,
        description=description,
        const=const,
        gt=gt,
        ge=ge,
        lt=lt,
        le=le,
        multiple_of=multiple_of,
        min_items=min_items,
        max_items=max_items,
        min_length=min_length,
        max_length=max_length,
        regex=regex,
        **extra,
    )
"
12214,"def execute_config(args, parser):
    stdout_write = getLogger(""conda.stdout"").info
    stderr_write = getLogger(""conda.stderr"").info
    json_warnings = []
    json_get = {}

    if args.show_sources:
        if context.json:
            stdout_write(json.dumps(
                context.collect_all(), sort_keys=True, indent=2, separators=(',', ': '),
                cls=EntityEncoder
            ))
        else:
            lines = []
            for source, reprs in context.collect_all().items():
                lines.append(""==> %s <=="" % source)
                lines.extend(format_dict(reprs))
                lines.append('')
            stdout_write('\n'.join(lines))
        return

    if args.show is not None:
        if args.show:
            paramater_names = args.show
            all_names = context.list_parameters()
            not_params = set(paramater_names) - set(all_names)
            if not_params:
                from ..exceptions import ArgumentError
                from ..common.io import dashlist
                raise ArgumentError(""Invalid configuration parameters: %s"" % dashlist(not_params))
        else:
            paramater_names = context.list_parameters()

        d = dict((key, getattr(context, key)) for key in paramater_names)
        if context.json:
            stdout_write(json.dumps(
                d, sort_keys=True, indent=2, separators=(',', ': '), cls=EntityEncoder
            ))
        else:
            # Add in custom formatting
            if 'custom_channels' in d:
                d['custom_channels'] = {
                    channel.name: ""%s://%s"" % (channel.scheme, channel.location)
                    for channel in d['custom_channels'].values()
                }
            if 'custom_multichannels' in d:
                from ..common.io import dashlist
                d['custom_multichannels'] = {
                    multichannel_name: dashlist(channels, indent=4)
                    for multichannel_name, channels in d['custom_multichannels'].items()
                }

            stdout_write('\n'.join(format_dict(d)))
        context.validate_configuration()
        return

    if args.describe is not None:
        if args.describe:
            paramater_names = args.describe
            all_names = context.list_parameters()
            not_params = set(paramater_names) - set(all_names)
            if not_params:
                from ..exceptions import ArgumentError
                from ..common.io import dashlist
                raise ArgumentError(""Invalid configuration parameters: %s"" % dashlist(not_params))
            if context.json:
                stdout_write(json.dumps(
                    [context.describe_parameter(name) for name in paramater_names],
                    sort_keys=True, indent=2, separators=(',', ': '), cls=EntityEncoder
                ))
            else:
                builder = []
                builder.extend(concat(parameter_description_builder(name)
                                      for name in paramater_names))
                stdout_write('\n'.join(builder))
        else:
            if context.json:
                skip_categories = ('CLI-only', 'Hidden and Undocumented')
                paramater_names = sorted(concat(
                    parameter_names for category, parameter_names in context.category_map.items()
                    if category not in skip_categories
                ))
                stdout_write(json.dumps(
                    [context.describe_parameter(name) for name in paramater_names],
                    sort_keys=True, indent=2, separators=(',', ': '), cls=EntityEncoder
                ))
            else:
                stdout_write(describe_all_parameters())
        return

    if args.validate:
        context.validate_all()
        return

    if args.system:
        rc_path = sys_rc_path
    elif args.env:
        if 'CONDA_PREFIX' in os.environ:
            rc_path = join(os.environ['CONDA_PREFIX'], '.condarc')
        else:
            rc_path = user_rc_path
    elif args.file:
        rc_path = args.file
    else:
        rc_path = user_rc_path

    if args.write_default:
        if isfile(rc_path):
            with open(rc_path) as fh:
                data = fh.read().strip()
            if data:
                raise CondaError(""The file '%s' ""
                                 ""already contains configuration information.\n""
                                 ""Remove the file to proceed.\n""
                                 ""Use `conda config --describe` to display default configuration.""
                                 % rc_path)

        with open(rc_path, 'w') as fh:
            fh.write(describe_all_parameters())
        return

    # read existing condarc
    if os.path.exists(rc_path):
        with open(rc_path, 'r') as fh:
            # round trip load required because... we need to round trip
            rc_config = yaml_round_trip_load(fh) or {}
    elif os.path.exists(sys_rc_path):
        # In case the considered rc file doesn't exist, fall back to the system rc
        with open(sys_rc_path, 'r') as fh:
            rc_config = yaml_round_trip_load(fh) or {}
    else:
        rc_config = {}

    grouped_paramaters = groupby(lambda p: context.describe_parameter(p)['parameter_type'],
                                 context.list_parameters())
    primitive_parameters = grouped_paramaters['primitive']
    sequence_parameters = grouped_paramaters['sequence']
    map_parameters = grouped_paramaters['map']
    all_parameters = primitive_parameters + sequence_parameters + map_parameters

    # Get
    if args.get is not None:
        context.validate_all()
        if args.get == []:
            args.get = sorted(rc_config.keys())

        value_not_found = object()
        for key in args.get:
            key_parts = key.split(""."")

            if key_parts[0] not in all_parameters:
                message = ""unknown key %s"" % key_parts[0]
                if not context.json:
                    stderr_write(message)
                else:
                    json_warnings.append(message)
                continue

            remaining_rc_config = rc_config
            for k in key_parts:
                if k in remaining_rc_config:
                    remaining_rc_config = remaining_rc_config[k]
                else:
                    remaining_rc_config = value_not_found
                    break

            if remaining_rc_config is value_not_found:
                pass
            elif context.json:
                json_get[key] = remaining_rc_config
            else:
                print_config_item(key, remaining_rc_config)

    if args.stdin:
        content = timeout(5, sys.stdin.read)
        if not content:
            return
        try:
            # round trip load required because... we need to round trip
            parsed = yaml_round_trip_load(content)
            rc_config.update(parsed)
        except Exception:  # pragma: no cover
            from ..exceptions import ParseError
            raise ParseError(""invalid yaml content:\n%s"" % content)

    # prepend, append, add
    for arg, prepend in zip((args.prepend, args.append), (True, False)):
        for key, item in arg:
            key, subkey = key.split('.', 1) if '.' in key else (key, None)
            if key == 'channels' and key not in rc_config:
                rc_config[key] = ['defaults']
            if key in sequence_parameters:
                arglist = rc_config.setdefault(key, [])
            elif key in map_parameters:
                arglist = rc_config.setdefault(key, {}).setdefault(subkey, [])
            else:
                from ..exceptions import CondaValueError
                raise CondaValueError(""Key '%s' is not a known sequence parameter."" % key)
            if not (isinstance(arglist, Sequence) and not
                    isinstance(arglist, str)):
                from ..exceptions import CouldntParseError
                bad = rc_config[key].__class__.__name__
                raise CouldntParseError(""key %r should be a list, not %s."" % (key, bad))
            if item in arglist:
                message_key = key + ""."" + subkey if subkey is not None else key
                # Right now, all list keys should not contain duplicates
                message = ""Warning: '%s' already in '%s' list, moving to the %s"" % (
                    item, message_key, ""top"" if prepend else ""bottom"")
                if subkey is None:
                    arglist = rc_config[key] = [p for p in arglist if p != item]
                else:
                    arglist = rc_config[key][subkey] = [p for p in arglist if p != item]
                if not context.json:
                    stderr_write(message)
                else:
                    json_warnings.append(message)
            arglist.insert(0 if prepend else len(arglist), item)

    # Set
    for key, item in args.set:
        key, subkey = key.split('.', 1) if '.' in key else (key, None)
        if key in primitive_parameters:
            value = context.typify_parameter(key, item, ""--set parameter"")
            rc_config[key] = value
        elif key in map_parameters:
            argmap = rc_config.setdefault(key, {})
            argmap[subkey] = item
        else:
            from ..exceptions import CondaValueError
            raise CondaValueError(""Key '%s' is not a known primitive parameter."" % key)

    # Remove
    for key, item in args.remove:
        key, subkey = key.split('.', 1) if '.' in key else (key, None)
        if key not in rc_config:
            if key != 'channels':
                from ..exceptions import CondaKeyError
                raise CondaKeyError(key, ""key %r is not in the config file"" % key)
            rc_config[key] = ['defaults']
        if item not in rc_config[key]:
            from ..exceptions import CondaKeyError
            raise CondaKeyError(key, ""%r is not in the %r key of the config file"" %
                                (item, key))
        rc_config[key] = [i for i in rc_config[key] if i != item]

    # Remove Key
    for key, in args.remove_key:
        key, subkey = key.split('.', 1) if '.' in key else (key, None)
        if key not in rc_config:
            from ..exceptions import CondaKeyError
            raise CondaKeyError(key, ""key %r is not in the config file"" %
                                key)
        del rc_config[key]

    # config.rc_keys
    if not args.get:

        # Add representers for enums.
        # Because a representer cannot be added for the base Enum class (it must be added for
        # each specific Enum subclass - and because of import rules), I don't know of a better
        # location to do this.
        def enum_representer(dumper, data):
            return dumper.represent_str(str(data))

        yaml.representer.RoundTripRepresenter.add_representer(SafetyChecks, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(PathConflict, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(DepsModifier, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(UpdateModifier, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(ChannelPriority, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(SatSolverChoice, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(
            ExperimentalSolverChoice, enum_representer
        )

        try:
            with open(rc_path, 'w') as rc:
                rc.write(yaml_round_trip_dump(rc_config))
        except (IOError, OSError) as e:
            raise CondaError('Cannot write to condarc file at %s\n'
                             'Caused by %r' % (rc_path, e))

    if context.json:
        from .common import stdout_json_success
        stdout_json_success(
            rc_path=rc_path,
            warnings=json_warnings,
            get=json_get
        )
    return
","def execute_config(args, parser):
    stdout_write = getLogger(""conda.stdout"").info
    stderr_write = getLogger(""conda.stderr"").info
    json_warnings = []
    json_get = {}

    if args.show_sources:
        if context.json:
            stdout_write(json.dumps(
                context.collect_all(), sort_keys=True, indent=2, separators=(',', ': '),
                cls=EntityEncoder
            ))
        else:
            lines = []
            for source, reprs in context.collect_all().items():
                lines.append(""==> %s <=="" % source)
                lines.extend(format_dict(reprs))
                lines.append('')
            stdout_write('\n'.join(lines))
        return

    if args.show is not None:
        if args.show:
            paramater_names = args.show
            all_names = context.list_parameters()
            not_params = set(paramater_names) - set(all_names)
            if not_params:
                from ..exceptions import ArgumentError
                from ..common.io import dashlist
                raise ArgumentError(""Invalid configuration parameters: %s"" % dashlist(not_params))
        else:
            paramater_names = context.list_parameters()

        d = {key: getattr(context, key) for key in paramater_names}
        if context.json:
            stdout_write(json.dumps(
                d, sort_keys=True, indent=2, separators=(',', ': '), cls=EntityEncoder
            ))
        else:
            # Add in custom formatting
            if 'custom_channels' in d:
                d['custom_channels'] = {
                    channel.name: ""%s://%s"" % (channel.scheme, channel.location)
                    for channel in d['custom_channels'].values()
                }
            if 'custom_multichannels' in d:
                from ..common.io import dashlist
                d['custom_multichannels'] = {
                    multichannel_name: dashlist(channels, indent=4)
                    for multichannel_name, channels in d['custom_multichannels'].items()
                }

            stdout_write('\n'.join(format_dict(d)))
        context.validate_configuration()
        return

    if args.describe is not None:
        if args.describe:
            paramater_names = args.describe
            all_names = context.list_parameters()
            not_params = set(paramater_names) - set(all_names)
            if not_params:
                from ..exceptions import ArgumentError
                from ..common.io import dashlist
                raise ArgumentError(""Invalid configuration parameters: %s"" % dashlist(not_params))
            if context.json:
                stdout_write(json.dumps(
                    [context.describe_parameter(name) for name in paramater_names],
                    sort_keys=True, indent=2, separators=(',', ': '), cls=EntityEncoder
                ))
            else:
                builder = []
                builder.extend(concat(parameter_description_builder(name)
                                      for name in paramater_names))
                stdout_write('\n'.join(builder))
        else:
            if context.json:
                skip_categories = ('CLI-only', 'Hidden and Undocumented')
                paramater_names = sorted(concat(
                    parameter_names for category, parameter_names in context.category_map.items()
                    if category not in skip_categories
                ))
                stdout_write(json.dumps(
                    [context.describe_parameter(name) for name in paramater_names],
                    sort_keys=True, indent=2, separators=(',', ': '), cls=EntityEncoder
                ))
            else:
                stdout_write(describe_all_parameters())
        return

    if args.validate:
        context.validate_all()
        return

    if args.system:
        rc_path = sys_rc_path
    elif args.env:
        if 'CONDA_PREFIX' in os.environ:
            rc_path = join(os.environ['CONDA_PREFIX'], '.condarc')
        else:
            rc_path = user_rc_path
    elif args.file:
        rc_path = args.file
    else:
        rc_path = user_rc_path

    if args.write_default:
        if isfile(rc_path):
            with open(rc_path) as fh:
                data = fh.read().strip()
            if data:
                raise CondaError(""The file '%s' ""
                                 ""already contains configuration information.\n""
                                 ""Remove the file to proceed.\n""
                                 ""Use `conda config --describe` to display default configuration.""
                                 % rc_path)

        with open(rc_path, 'w') as fh:
            fh.write(describe_all_parameters())
        return

    # read existing condarc
    if os.path.exists(rc_path):
        with open(rc_path, 'r') as fh:
            # round trip load required because... we need to round trip
            rc_config = yaml_round_trip_load(fh) or {}
    elif os.path.exists(sys_rc_path):
        # In case the considered rc file doesn't exist, fall back to the system rc
        with open(sys_rc_path, 'r') as fh:
            rc_config = yaml_round_trip_load(fh) or {}
    else:
        rc_config = {}

    grouped_paramaters = groupby(lambda p: context.describe_parameter(p)['parameter_type'],
                                 context.list_parameters())
    primitive_parameters = grouped_paramaters['primitive']
    sequence_parameters = grouped_paramaters['sequence']
    map_parameters = grouped_paramaters['map']
    all_parameters = primitive_parameters + sequence_parameters + map_parameters

    # Get
    if args.get is not None:
        context.validate_all()
        if args.get == []:
            args.get = sorted(rc_config.keys())

        value_not_found = object()
        for key in args.get:
            key_parts = key.split(""."")

            if key_parts[0] not in all_parameters:
                message = ""unknown key %s"" % key_parts[0]
                if not context.json:
                    stderr_write(message)
                else:
                    json_warnings.append(message)
                continue

            remaining_rc_config = rc_config
            for k in key_parts:
                if k in remaining_rc_config:
                    remaining_rc_config = remaining_rc_config[k]
                else:
                    remaining_rc_config = value_not_found
                    break

            if remaining_rc_config is value_not_found:
                pass
            elif context.json:
                json_get[key] = remaining_rc_config
            else:
                print_config_item(key, remaining_rc_config)

    if args.stdin:
        content = timeout(5, sys.stdin.read)
        if not content:
            return
        try:
            # round trip load required because... we need to round trip
            parsed = yaml_round_trip_load(content)
            rc_config.update(parsed)
        except Exception:  # pragma: no cover
            from ..exceptions import ParseError
            raise ParseError(""invalid yaml content:\n%s"" % content)

    # prepend, append, add
    for arg, prepend in zip((args.prepend, args.append), (True, False)):
        for key, item in arg:
            key, subkey = key.split('.', 1) if '.' in key else (key, None)
            if key == 'channels' and key not in rc_config:
                rc_config[key] = ['defaults']
            if key in sequence_parameters:
                arglist = rc_config.setdefault(key, [])
            elif key in map_parameters:
                arglist = rc_config.setdefault(key, {}).setdefault(subkey, [])
            else:
                from ..exceptions import CondaValueError
                raise CondaValueError(""Key '%s' is not a known sequence parameter."" % key)
            if not (isinstance(arglist, Sequence) and not
                    isinstance(arglist, str)):
                from ..exceptions import CouldntParseError
                bad = rc_config[key].__class__.__name__
                raise CouldntParseError(""key %r should be a list, not %s."" % (key, bad))
            if item in arglist:
                message_key = key + ""."" + subkey if subkey is not None else key
                # Right now, all list keys should not contain duplicates
                message = ""Warning: '%s' already in '%s' list, moving to the %s"" % (
                    item, message_key, ""top"" if prepend else ""bottom"")
                if subkey is None:
                    arglist = rc_config[key] = [p for p in arglist if p != item]
                else:
                    arglist = rc_config[key][subkey] = [p for p in arglist if p != item]
                if not context.json:
                    stderr_write(message)
                else:
                    json_warnings.append(message)
            arglist.insert(0 if prepend else len(arglist), item)

    # Set
    for key, item in args.set:
        key, subkey = key.split('.', 1) if '.' in key else (key, None)
        if key in primitive_parameters:
            value = context.typify_parameter(key, item, ""--set parameter"")
            rc_config[key] = value
        elif key in map_parameters:
            argmap = rc_config.setdefault(key, {})
            argmap[subkey] = item
        else:
            from ..exceptions import CondaValueError
            raise CondaValueError(""Key '%s' is not a known primitive parameter."" % key)

    # Remove
    for key, item in args.remove:
        key, subkey = key.split('.', 1) if '.' in key else (key, None)
        if key not in rc_config:
            if key != 'channels':
                from ..exceptions import CondaKeyError
                raise CondaKeyError(key, ""key %r is not in the config file"" % key)
            rc_config[key] = ['defaults']
        if item not in rc_config[key]:
            from ..exceptions import CondaKeyError
            raise CondaKeyError(key, ""%r is not in the %r key of the config file"" %
                                (item, key))
        rc_config[key] = [i for i in rc_config[key] if i != item]

    # Remove Key
    for key, in args.remove_key:
        key, subkey = key.split('.', 1) if '.' in key else (key, None)
        if key not in rc_config:
            from ..exceptions import CondaKeyError
            raise CondaKeyError(key, ""key %r is not in the config file"" %
                                key)
        del rc_config[key]

    # config.rc_keys
    if not args.get:

        # Add representers for enums.
        # Because a representer cannot be added for the base Enum class (it must be added for
        # each specific Enum subclass - and because of import rules), I don't know of a better
        # location to do this.
        def enum_representer(dumper, data):
            return dumper.represent_str(str(data))

        yaml.representer.RoundTripRepresenter.add_representer(SafetyChecks, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(PathConflict, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(DepsModifier, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(UpdateModifier, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(ChannelPriority, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(SatSolverChoice, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(
            ExperimentalSolverChoice, enum_representer
        )

        try:
            with open(rc_path, 'w') as rc:
                rc.write(yaml_round_trip_dump(rc_config))
        except (IOError, OSError) as e:
            raise CondaError('Cannot write to condarc file at %s\n'
                             'Caused by %r' % (rc_path, e))

    if context.json:
        from .common import stdout_json_success
        stdout_json_success(
            rc_path=rc_path,
            warnings=json_warnings,
            get=json_get
        )
    return
"
752,"def test_PeaksAndMetricsDirectionGetter():

    class SillyModel(object):
        def fit(self, data, mask=None):
            return SillyFit(self)

    class SillyFit(object):

        def __init__(self, model):
            self.model = model

        def odf(self, sphere):
            odf = np.zeros(sphere.theta.shape)
            r = np.random.randint(0, len(odf))
            odf[r] = 1
            return odf

    def get_direction(dg, point, dir):
        newdir = dir.copy()
        state = dg.get_direction(point, newdir)
        return (state, np.array(newdir))

    data = np.random.random((3, 4, 5, 2))
    peaks = peaks_from_model(SillyModel(), data, default_sphere,
                             relative_peak_threshold=.5,
                             min_separation_angle=25)
    peaks._initialize()

    up = np.zeros(3)
    up[2] = 1.
    down = -up

    for i in range(3-1):
        for j in range(4-1):
            for k in range(5-1):
                point = np.array([i, j, k], dtype=float)

                # Test that the angle threshold rejects points
                peaks.ang_thr = 0.
                state, nd = get_direction(peaks, point, up)
                npt.assert_equal(state, 1)

                # Here we leverage the fact that we know Hemispheres project
                # all their vertices into the z >= 0 half of the sphere.
                peaks.ang_thr = 90.
                state, nd = get_direction(peaks, point, up)
                npt.assert_equal(state, 0)
                expected_dir = peaks.peak_dirs[i, j, k, 0]
                npt.assert_array_almost_equal(nd, expected_dir)
                state, nd = get_direction(peaks, point, down)
                npt.assert_array_almost_equal(nd, -expected_dir)

                # Check that we can get directions at non-integer points
                point += np.random.random(3)
                state, nd = get_direction(peaks, point, up)
                npt.assert_equal(state, 0)

                # Check that points are rounded to get initial direction
                point -= .5
                id = peaks.initial_direction(point)
                # id should be a (1, 3) array
                npt.assert_array_almost_equal(id, [expected_dir])

    peaks1 = peaks_from_model(SillyModel(), data, default_sphere,
                              relative_peak_threshold=.5,
                              min_separation_angle=25,
                              npeaks=1)
    peaks1._initialize()
    point = np.array([1, 1, 1], dtype=float)

    # id should have one direction
    npt.assert_array_almost_equal(len(peaks1.initial_direction(point)), 1)
    npt.assert_array_almost_equal(len(peaks.initial_direction(point)), 1)

","def test_PeaksAndMetricsDirectionGetter():

    class SillyModel(object):
        def fit(self, data, mask=None):
            return SillyFit(self)

    class SillyFit(object):

        def __init__(self, model):
            self.model = model

        def odf(self, sphere):
            odf = np.zeros(sphere.theta.shape)
            r = np.random.randint(0, len(odf))
            odf[r] = 1
            return odf

    def get_direction(dg, point, dir):
        newdir = dir.copy()
        state = dg.get_direction(point, newdir)
        return (state, np.array(newdir))

    data = np.random.random((3, 4, 5, 2))
    peaks = peaks_from_model(SillyModel(), data, default_sphere,
                             relative_peak_threshold=.5,
                             min_separation_angle=25)
    peaks._initialize()

    up = np.zeros(3)
    up[2] = 1.
    down = -up

    for i in range(3-1):
        for j in range(4-1):
            for k in range(5-1):
                point = np.array([i, j, k], dtype=float)

                # Test that the angle threshold rejects points
                peaks.ang_thr = 0.
                state, nd = get_direction(peaks, point, up)
                npt.assert_equal(state, 1)

                # Here we leverage the fact that we know Hemispheres project
                # all their vertices into the z >= 0 half of the sphere.
                peaks.ang_thr = 90.
                state, nd = get_direction(peaks, point, up)
                npt.assert_equal(state, 0)
                expected_dir = peaks.peak_dirs[i, j, k, 0]
                npt.assert_array_almost_equal(nd, expected_dir)
                state, nd = get_direction(peaks, point, down)
                npt.assert_array_almost_equal(nd, -expected_dir)

                # Check that we can get directions at non-integer points
                point += np.random.random(3)
                state, nd = get_direction(peaks, point, up)
                npt.assert_equal(state, 0)

                # Check that points are rounded to get initial direction
                point -= .5
                id = peaks.initial_direction(point)
                # id should be a (1, 3) array
                npt.assert_array_almost_equal(id, [expected_dir])

    peaks1 = peaks_from_model(SillyModel(), data, default_sphere,
                              relative_peak_threshold=.5,
                              min_separation_angle=25,
                              npeaks=1)
    peaks1._initialize()
    point = np.array([1, 1, 1], dtype=float)

    # it should have one direction
    npt.assert_array_almost_equal(len(peaks1.initial_direction(point)), 1)
    npt.assert_array_almost_equal(len(peaks.initial_direction(point)), 1)

"
58532,"def wait_for_gpu(gpu_id=None,
                 target_util=0.01,
                 retry=20,
                 gpu_memory_limit=None):
    """"""Checks if a given GPU has freed memory.

    Requires ``gputil`` to be installed: ``pip install gputil``.

    Args:
        gpu_id (Optional[Union[int, str]]): GPU id or uuid to check.
            Must be found within GPUtil.getGPUs(). If none, resorts to
            the first item returned from `ray.get_gpu_ids()`.
        target_util (float): The utilization threshold to reach to unblock.
            Set this to 0 to block until the GPU is completely free.
        retry (int): Number of times to check GPU limit. Sleeps 5
            seconds between checks.
        gpu_memory_limit (float): Deprecated.

    Returns:
        bool: True if free.

    Raises:
        RuntimeError: If GPUtil is not found, if no GPUs are detected
            or if the check fails.

    Example:

    .. code-block:: python

        def tune_func(config):
            tune.util.wait_for_gpu()
            train()

        tune.run(tune_func, resources_per_trial={""GPU"": 1}, num_samples=10)
    """"""
    if gpu_memory_limit:
        raise ValueError(""'gpu_memory_limit' is deprecated. ""
                         ""Use 'target_util' instead."")
    if GPUtil is None:
        raise RuntimeError(
            ""GPUtil must be installed if calling `wait_for_gpu`."")

    if gpu_id is None:
        gpu_id_list = ray.get_gpu_ids()
        if not gpu_id_list:
            raise RuntimeError(""No GPU ids found from `ray.get_gpu_ids()`. ""
                               ""Did you set Tune resources correctly?"")
        gpu_id = gpu_id_list[0]

    gpu_attr = 'id'
    if isinstance(gpu_id, str):
        try:
            # GPU ID returned from `ray.get_gpu_ids()` is a str representation
            # of the int GPU ID, so check for this case
            gpu_id = int(gpu_id)
        except ValueError:
            # Could not coerce gpu_id to int, so assume UUID and compare against `uuid` attribute
            # e.g., 'GPU-04546190-b68d-65ac-101b-035f8faed77d'
            gpu_attr = 'uuid'

    def gpu_id_fn(g):
        # Returns either `g.id` or `g.uuid` depending on the format of the input `gpu_id`
        return getattr(g, gpu_attr)

    gpu_ids = {gpu_id_fn(g) for g in GPUtil.getGPUS()}
    if gpu_id not in gpu_ids:
        raise ValueError(
            f""{gpu_id} not found in set of available GPUs: {gpu_ids}. ""
            ""`wait_for_gpu` takes either GPU ordinal ID (e.g., '0') or ""
            ""UUID (e.g., 'GPU-04546190-b68d-65ac-101b-035f8faed77d')."")

    for i in range(int(retry)):
        gpu_object = next(g for g in GPUtil.getGPUs() if gpu_id_fn(g) == gpu_id)
        if gpu_object.memoryUtil > target_util:
            logger.info(f""Waiting for GPU util to reach {target_util}. ""
                        f""Util: {gpu_object.memoryUtil:0.3f}"")
            time.sleep(5)
        else:
            return True
    raise RuntimeError(""GPU memory was not freed."")

","def wait_for_gpu(gpu_id=None,
                 target_util=0.01,
                 retry=20,
                 gpu_memory_limit=None):
    """"""Checks if a given GPU has freed memory.

    Requires ``gputil`` to be installed: ``pip install gputil``.

    Args:
        gpu_id (Optional[Union[int, str]]): GPU id or uuid to check.
            Must be found within GPUtil.getGPUs(). If none, resorts to
            the first item returned from `ray.get_gpu_ids()`.
        target_util (float): The utilization threshold to reach to unblock.
            Set this to 0 to block until the GPU is completely free.
        retry (int): Number of times to check GPU limit. Sleeps 5
            seconds between checks.
        gpu_memory_limit (float): Deprecated.

    Returns:
        bool: True if free.

    Raises:
        RuntimeError: If GPUtil is not found, if no GPUs are detected
            or if the check fails.

    Example:

    .. code-block:: python

        def tune_func(config):
            tune.util.wait_for_gpu()
            train()

        tune.run(tune_func, resources_per_trial={""GPU"": 1}, num_samples=10)
    """"""
    if gpu_memory_limit:
        raise ValueError(""'gpu_memory_limit' is deprecated. ""
                         ""Use 'target_util' instead."")
    if GPUtil is None:
        raise RuntimeError(
            ""GPUtil must be installed if calling `wait_for_gpu`."")

    if gpu_id is None:
        gpu_id_list = ray.get_gpu_ids()
        if not gpu_id_list:
            raise RuntimeError(""No GPU ids found from `ray.get_gpu_ids()`. ""
                               ""Did you set Tune resources correctly?"")
        gpu_id = gpu_id_list[0]

    gpu_attr = 'id'
    if isinstance(gpu_id, str):
        if gpu_id.isdigit():
            gpu_id = int(gpu_id)
        else:
            gpu_attr = ""attr""

    def gpu_id_fn(g):
        # Returns either `g.id` or `g.uuid` depending on the format of the input `gpu_id`
        return getattr(g, gpu_attr)

    gpu_ids = {gpu_id_fn(g) for g in GPUtil.getGPUS()}
    if gpu_id not in gpu_ids:
        raise ValueError(
            f""{gpu_id} not found in set of available GPUs: {gpu_ids}. ""
            ""`wait_for_gpu` takes either GPU ordinal ID (e.g., '0') or ""
            ""UUID (e.g., 'GPU-04546190-b68d-65ac-101b-035f8faed77d')."")

    for i in range(int(retry)):
        gpu_object = next(g for g in GPUtil.getGPUs() if gpu_id_fn(g) == gpu_id)
        if gpu_object.memoryUtil > target_util:
            logger.info(f""Waiting for GPU util to reach {target_util}. ""
                        f""Util: {gpu_object.memoryUtil:0.3f}"")
            time.sleep(5)
        else:
            return True
    raise RuntimeError(""GPU memory was not freed."")

"
39851,"def get_package_version():
    return __version__ + ""."" + os.environ['HOROVOD_LOCAL_VERSION'] if 'HOROVOD_LOCAL_VERSION' in os.environ else __version__

","def get_package_version():
    return __version__ + ""."" + os.environ['HOROVOD_LOCAL_VERSION'] if os.environ.get('HOROVOD_LOCAL_VERSION') else __version__

"
8860,"def add_common_arguments(parser):
    """"""Add common and configuration-related arguments to a ``parser``.

    :param parser: Argument parser (or subparser)
    :type parser: argparse.ArgumentParser

    This functions adds the common arguments for Sopel's command line tools.
    It adds the following arguments:

    * ``-c``/``--config``: the name of the Sopel config, or its absolute path
    * ``--config-dir``: the directory to scan for config files

    This can be used on an argument parser, or an argument subparser, to handle
    these cases::

        [sopel-command] -c [filename]
        [sopel-command] [action] -c [filename]
        [sopel-command] --config-dir [directory] -c [name]

    Then, when the parser parses the command line arguments, it will expose
    ``config`` and ``configdir`` options that can be used to find and load
    Sopel's settings.

    The default value for ``config`` is either the value of the environement
    variable ``SOPEL_CONFIG``, or the string ``default``.

    .. seealso::

        The :func:`sopel.cli.utils.load_settings` function uses an ``options``
        object from a parser configured with such arguments.

    """"""
    parser.add_argument(
        '-c', '--config',
        default=os.environ.get('SOPEL_CONFIG') or 'default',
        metavar='filename',
        dest='config',
        help=inspect.cleandoc(""""""
            Use a specific configuration file.
            A config name can be given and the configuration file will be
            found in Sopel's homedir (defaults to ``~/.sopel/default.cfg``).
            An absolute pathname can be provided instead to use an
            arbitrary location.
            When the ``SOPEL_CONFIG`` environement variable is set and not
            empty, it is used as the default value.
        """"""))
    parser.add_argument(
        '--config-dir',
        default=config.DEFAULT_HOMEDIR,
        dest='configdir',
        help='Look for configuration files in this directory.')

","def add_common_arguments(parser):
    """"""Add common and configuration-related arguments to a ``parser``.

    :param parser: Argument parser (or subparser)
    :type parser: argparse.ArgumentParser

    This functions adds the common arguments for Sopel's command line tools.
    It adds the following arguments:

    * ``-c``/``--config``: the name of the Sopel config, or its absolute path
    * ``--config-dir``: the directory to scan for config files

    This can be used on an argument parser, or an argument subparser, to handle
    these cases::

        [sopel-command] -c [filename]
        [sopel-command] [action] -c [filename]
        [sopel-command] --config-dir [directory] -c [name]

    Then, when the parser parses the command line arguments, it will expose
    ``config`` and ``configdir`` options that can be used to find and load
    Sopel's settings.

    The default value for ``config`` is either the value of the environment
    variable ``SOPEL_CONFIG``, or the string ``default``.

    .. seealso::

        The :func:`sopel.cli.utils.load_settings` function uses an ``options``
        object from a parser configured with such arguments.

    """"""
    parser.add_argument(
        '-c', '--config',
        default=os.environ.get('SOPEL_CONFIG') or 'default',
        metavar='filename',
        dest='config',
        help=inspect.cleandoc(""""""
            Use a specific configuration file.
            A config name can be given and the configuration file will be
            found in Sopel's homedir (defaults to ``~/.sopel/default.cfg``).
            An absolute pathname can be provided instead to use an
            arbitrary location.
            When the ``SOPEL_CONFIG`` environement variable is set and not
            empty, it is used as the default value.
        """"""))
    parser.add_argument(
        '--config-dir',
        default=config.DEFAULT_HOMEDIR,
        dest='configdir',
        help='Look for configuration files in this directory.')

"
43824,"def quantum_monte_carlo(fn, wires, target_wire, estimation_wires):
    r""""""Provides the circuit to perform the
    `quantum Monte Carlo estimation `__ algorithm.

    The input ``fn`` should be the quantum circuit corresponding to the :math:`\mathcal{F}` unitary
    in the paper above that encodes the probability distribution and random variable onto ``wires``
    so that measurement of the ``target_wire`` provides the expectation value to be estimated.
    The quantum Monte Carlo algorithm then estimates the expectation value using quantum phase
    estimation (check out :class:`~.QuantumPhaseEstimation` for more details), using the
    ``estimation_wires``.

    .. note::

        A complementary approach for quantum Monte Carlo is available with the
        :class:`~.QuantumMonteCarlo` template.

        The ``quantum_monte_carlo`` transform is intended for
        use when you already have the circuit for performing :math:`\mathcal{F}` set up, and is
        compatible with resource estimation and potential hardware implementation. The
        :class:`~.QuantumMonteCarlo` template is unitary-based and is only compatible with
        simulators, but may perform faster and is suited to quick prototyping.

    Args:
        fn (Callable): a quantum function that applies quantum operations according to the
            :math:`\mathcal{F}` unitary used as part of quantum Monte Carlo estimation
        wires (Union[Wires or Sequence[int]]): the wires acted upon by the ``fn`` circuit
        target_wire (Union[Wires, int]): The wire in which the expectation value is encoded. Must be
            contained within ``wires``.
        estimation_wires (Union[Wires, Sequence[int], or int]): the wires used for phase estimation

    Returns:
        function: The circuit for quantum Monte Carlo estimation

    Raises:
        ValueError: if ``wires`` and ``estimation_wires`` share a common wire

    .. UsageDetails::

        Consider an input quantum circuit ``fn`` that performs the unitary

        .. math::

            \mathcal{F} = \mathcal{R} \mathcal{A}.

        .. figure:: ../../_static/ops/f.svg
            :align: center
            :width: 15%
            :target: javascript:void(0);

        Here, the unitary :math:`\mathcal{A}` prepares a probability distribution :math:`p(i)` of
        dimension :math:`M = 2^{m}` over :math:`m \geq 1` qubits:

        .. math::

            \mathcal{A}|0\rangle^{\otimes m} = \sum_{i \in X} p(i) |i\rangle

        where :math:`X = \{0, 1, \ldots, M - 1\}` and :math:`|i\rangle` is the basis state
        corresponding to :math:`i`. The :math:`\mathcal{R}` unitary imprints the
        result of a function :math:`f: X \rightarrow [0, 1]` onto an ancilla qubit:

        .. math::

            \mathcal{R}|i\rangle |0\rangle = |i\rangle \left(\sqrt{1 - f(i)} |0\rangle + \sqrt{f(i)}|1\rangle\right).

        Following `this `__ paper,
        it can be seen that the probability of measuring the state :math:`|1\rangle` in the final
        qubit is

        .. math::

            \mu = \sum_{i \in X} p(i) f(i).

        However, it is possible to measure :math:`\mu` more efficiently using quantum Monte Carlo
        estimation. This function transforms an input quantum circuit ``fn`` that performs the
        unitary :math:`\mathcal{F}` to a larger circuit for measuring :math:`\mu` using the quantum
        Monte Carlo algorithm.

        .. figure:: ../../_static/ops/qmc.svg
            :align: center
            :width: 60%
            :target: javascript:void(0);

        The algorithm proceeds as follows:

        #. The probability distribution :math:`p(i)` is encoded using a unitary :math:`\mathcal{A}`
           applied to the first :math:`m` qubits specified by ``wires``.
        #. The function :math:`f(i)` is encoded onto the ``target_wire`` using a unitary
           :math:`\mathcal{R}`.
        #. The unitary :math:`\mathcal{Q}` is defined with eigenvalues
           :math:`e^{\pm 2 \pi i \theta}` such that the phase :math:`\theta` encodes the expectation
           value through the equation :math:`\mu = (1 + \cos (\pi \theta)) / 2`. The circuit in
           steps 1 and 2 prepares an equal superposition over the two states corresponding to the
           eigenvalues :math:`e^{\pm 2 \pi i \theta}`.
        #. The circuit returned by this function is applied so that :math:`\pm\theta` can be
           estimated by finding the probabilities of the :math:`n` estimation wires. This in turn
           allows for the estimation of :math:`\mu`.

        Visit `Rebentrost et al. (2018)
        `__ for further details.
        In this algorithm, the number of applications :math:`N` of the :math:`\mathcal{Q}` unitary
        scales as :math:`2^{n}`. However, due to the use of quantum phase estimation, the error
        :math:`\epsilon` scales as :math:`\mathcal{O}(2^{-n})`. Hence,

        .. math::

            N = \mathcal{O}\left(\frac{1}{\epsilon}\right).

        This scaling can be compared to standard Monte Carlo estimation, where :math:`N` samples are
        generated from the probability distribution and the average over :math:`f` is taken. In that
        case,

        .. math::

            N =  \mathcal{O}\left(\frac{1}{\epsilon^{2}}\right).

        Hence, the quantum Monte Carlo algorithm has a quadratically improved time complexity with
        :math:`N`.

        **Example**

        Consider a standard normal distribution :math:`p(x)` and a function
        :math:`f(x) = \sin ^{2} (x)`. The expectation value of :math:`f(x)` is
        :math:`\int_{-\infty}^{\infty}f(x)p(x) \approx 0.432332`. This number can be approximated by
        discretizing the problem and using the quantum Monte Carlo algorithm.

        First, the problem is discretized:

        .. code-block:: python

            from scipy.stats import norm

            m = 5
            M = 2 ** m

            xmax = np.pi  # bound to region [-pi, pi]
            xs = np.linspace(-xmax, xmax, M)

            probs = np.array([norm().pdf(x) for x in xs])
            probs /= np.sum(probs)

            func = lambda i: np.sin(xs[i]) ** 2
            r_rotations = np.array([2 * np.arcsin(np.sqrt(func(i))) for i in range(M)])

        The ``quantum_monte_carlo`` transform can then be used:

        .. code-block::

            from pennylane.templates.state_preparations.mottonen import (
                _uniform_rotation_dagger as r_unitary,
            )

            n = 6
            N = 2 ** n

            a_wires = range(m)
            wires = range(m + 1)
            target_wire = m
            estimation_wires = range(m + 1, n + m + 1)

            dev = qml.device(""default.qubit"", wires=(n + m + 1))

            def fn():
                qml.templates.MottonenStatePreparation(np.sqrt(probs), wires=a_wires)
                r_unitary(qml.RY, r_rotations, control_wires=a_wires[::-1], target_wire=target_wire)

            @qml.qnode(dev)
            def qmc():
                qml.quantum_monte_carlo(fn, wires, target_wire, estimation_wires)()
                return qml.probs(estimation_wires)

            phase_estimated = np.argmax(qmc()[:int(N / 2)]) / N

        The estimated value can be retrieved using the formula :math:`\mu = (1-\cos(\pi \theta))/2`

        >>> (1 - np.cos(np.pi * phase_estimated)) / 2
        0.42663476277231915

        It is also possible to explore the resources required to perform the quantum Monte Carlo
        algorithm

        >>> qtape = qmc.qtape.expand(depth=1)
        >>> qtape.get_resources()
        {'RY': 14674,
         'CNOT': 15686,
         'PhaseShift': 1020,
         'RX': 510,
         'CZ': 126,
         'PauliX': 1260,
         'Toffoli': 2016,
         'SWAP': 3,
         'Hadamard': 6,
         'ControlledPhaseShift': 15}
    """"""
    wires = Wires(wires)
    target_wire = Wires(target_wire)
    estimation_wires = Wires(estimation_wires)

    if Wires.shared_wires([wires, estimation_wires]):
        raise ValueError(""No wires can be shared between the wires and estimation_wires registers"")

    @wraps(fn)
    def wrapper(*args, **kwargs):
        fn(*args, **kwargs)
        for i, control_wire in enumerate(estimation_wires):
            Hadamard(control_wire)

            # Find wires eligible to be used as helper wires
            work_wires = estimation_wires.toset() - {control_wire}
            n_reps = 2 ** (len(estimation_wires) - (i + 1))

            q = apply_controlled_Q(
                fn,
                wires=wires,
                target_wire=target_wire,
                control_wire=control_wire,
                work_wires=work_wires,
            )

            for _ in range(n_reps):
                q(*args, **kwargs)

        QFT(wires=estimation_wires).inv()

    return wrapper
","def quantum_monte_carlo(fn, wires, target_wire, estimation_wires):
    r""""""Provides the circuit to perform the
    `quantum Monte Carlo estimation `__ algorithm.

    The input ``fn`` should be the quantum circuit corresponding to the :math:`\mathcal{F}` unitary
    in the paper above that encodes the probability distribution and random variable onto ``wires``
    so that measurement of the ``target_wire`` provides the expectation value to be estimated.
    The quantum Monte Carlo algorithm then estimates the expectation value using quantum phase
    estimation (check out :class:`~.QuantumPhaseEstimation` for more details), using the
    ``estimation_wires``.

    .. note::

        A complementary approach for quantum Monte Carlo is available with the
        :class:`~.QuantumMonteCarlo` template.

        The ``quantum_monte_carlo`` transform is intended for
        use when you already have the circuit for performing :math:`\mathcal{F}` set up, and is
        compatible with resource estimation and potential hardware implementation. The
        :class:`~.QuantumMonteCarlo` template is unitary-based and is only compatible with
        simulators, but may perform faster and is suited to quick prototyping.

    Args:
        fn (Callable): a quantum function that applies quantum operations according to the
            :math:`\mathcal{F}` unitary used as part of quantum Monte Carlo estimation
        wires (Union[Wires or Sequence[int]]): the wires acted upon by the ``fn`` circuit
        target_wire (Union[Wires, int]): The wire in which the expectation value is encoded. Must be
            contained within ``wires``.
        estimation_wires (Union[Wires, Sequence[int], or int]): the wires used for phase estimation

    Returns:
        function: The circuit for quantum Monte Carlo estimation

    Raises:
        ValueError: if ``wires`` and ``estimation_wires`` share a common wire

    .. UsageDetails::

        Consider an input quantum circuit ``fn`` that performs the unitary

        .. math::

            \mathcal{F} = \mathcal{R} \mathcal{A}.

        .. figure:: ../../_static/ops/f.svg
            :align: center
            :width: 15%
            :target: javascript:void(0);

        Here, the unitary :math:`\mathcal{A}` prepares a probability distribution :math:`p(i)` of
        dimension :math:`M = 2^{m}` over :math:`m \geq 1` qubits:

        .. math::

            \mathcal{A}|0\rangle^{\otimes m} = \sum_{i \in X} p(i) |i\rangle

        where :math:`X = \{0, 1, \ldots, M - 1\}` and :math:`|i\rangle` is the basis state
        corresponding to :math:`i`. The :math:`\mathcal{R}` unitary imprints the
        result of a function :math:`f: X \rightarrow [0, 1]` onto an ancilla qubit:

        .. math::

            \mathcal{R}|i\rangle |0\rangle = |i\rangle \left(\sqrt{1 - f(i)} |0\rangle + \sqrt{f(i)}|1\rangle\right).

        Following `this `__ paper,
       the probability of measuring the state :math:`|1\rangle` in the final
        qubit is

        .. math::

            \mu = \sum_{i \in X} p(i) f(i).

        However, it is possible to measure :math:`\mu` more efficiently using quantum Monte Carlo
        estimation. This function transforms an input quantum circuit ``fn`` that performs the
        unitary :math:`\mathcal{F}` to a larger circuit for measuring :math:`\mu` using the quantum
        Monte Carlo algorithm.

        .. figure:: ../../_static/ops/qmc.svg
            :align: center
            :width: 60%
            :target: javascript:void(0);

        The algorithm proceeds as follows:

        #. The probability distribution :math:`p(i)` is encoded using a unitary :math:`\mathcal{A}`
           applied to the first :math:`m` qubits specified by ``wires``.
        #. The function :math:`f(i)` is encoded onto the ``target_wire`` using a unitary
           :math:`\mathcal{R}`.
        #. The unitary :math:`\mathcal{Q}` is defined with eigenvalues
           :math:`e^{\pm 2 \pi i \theta}` such that the phase :math:`\theta` encodes the expectation
           value through the equation :math:`\mu = (1 + \cos (\pi \theta)) / 2`. The circuit in
           steps 1 and 2 prepares an equal superposition over the two states corresponding to the
           eigenvalues :math:`e^{\pm 2 \pi i \theta}`.
        #. The circuit returned by this function is applied so that :math:`\pm\theta` can be
           estimated by finding the probabilities of the :math:`n` estimation wires. This in turn
           allows for the estimation of :math:`\mu`.

        Visit `Rebentrost et al. (2018)
        `__ for further details.
        In this algorithm, the number of applications :math:`N` of the :math:`\mathcal{Q}` unitary
        scales as :math:`2^{n}`. However, due to the use of quantum phase estimation, the error
        :math:`\epsilon` scales as :math:`\mathcal{O}(2^{-n})`. Hence,

        .. math::

            N = \mathcal{O}\left(\frac{1}{\epsilon}\right).

        This scaling can be compared to standard Monte Carlo estimation, where :math:`N` samples are
        generated from the probability distribution and the average over :math:`f` is taken. In that
        case,

        .. math::

            N =  \mathcal{O}\left(\frac{1}{\epsilon^{2}}\right).

        Hence, the quantum Monte Carlo algorithm has a quadratically improved time complexity with
        :math:`N`.

        **Example**

        Consider a standard normal distribution :math:`p(x)` and a function
        :math:`f(x) = \sin ^{2} (x)`. The expectation value of :math:`f(x)` is
        :math:`\int_{-\infty}^{\infty}f(x)p(x) \approx 0.432332`. This number can be approximated by
        discretizing the problem and using the quantum Monte Carlo algorithm.

        First, the problem is discretized:

        .. code-block:: python

            from scipy.stats import norm

            m = 5
            M = 2 ** m

            xmax = np.pi  # bound to region [-pi, pi]
            xs = np.linspace(-xmax, xmax, M)

            probs = np.array([norm().pdf(x) for x in xs])
            probs /= np.sum(probs)

            func = lambda i: np.sin(xs[i]) ** 2
            r_rotations = np.array([2 * np.arcsin(np.sqrt(func(i))) for i in range(M)])

        The ``quantum_monte_carlo`` transform can then be used:

        .. code-block::

            from pennylane.templates.state_preparations.mottonen import (
                _uniform_rotation_dagger as r_unitary,
            )

            n = 6
            N = 2 ** n

            a_wires = range(m)
            wires = range(m + 1)
            target_wire = m
            estimation_wires = range(m + 1, n + m + 1)

            dev = qml.device(""default.qubit"", wires=(n + m + 1))

            def fn():
                qml.templates.MottonenStatePreparation(np.sqrt(probs), wires=a_wires)
                r_unitary(qml.RY, r_rotations, control_wires=a_wires[::-1], target_wire=target_wire)

            @qml.qnode(dev)
            def qmc():
                qml.quantum_monte_carlo(fn, wires, target_wire, estimation_wires)()
                return qml.probs(estimation_wires)

            phase_estimated = np.argmax(qmc()[:int(N / 2)]) / N

        The estimated value can be retrieved using the formula :math:`\mu = (1-\cos(\pi \theta))/2`

        >>> (1 - np.cos(np.pi * phase_estimated)) / 2
        0.42663476277231915

        It is also possible to explore the resources required to perform the quantum Monte Carlo
        algorithm

        >>> qtape = qmc.qtape.expand(depth=1)
        >>> qtape.get_resources()
        {'RY': 14674,
         'CNOT': 15686,
         'PhaseShift': 1020,
         'RX': 510,
         'CZ': 126,
         'PauliX': 1260,
         'Toffoli': 2016,
         'SWAP': 3,
         'Hadamard': 6,
         'ControlledPhaseShift': 15}
    """"""
    wires = Wires(wires)
    target_wire = Wires(target_wire)
    estimation_wires = Wires(estimation_wires)

    if Wires.shared_wires([wires, estimation_wires]):
        raise ValueError(""No wires can be shared between the wires and estimation_wires registers"")

    @wraps(fn)
    def wrapper(*args, **kwargs):
        fn(*args, **kwargs)
        for i, control_wire in enumerate(estimation_wires):
            Hadamard(control_wire)

            # Find wires eligible to be used as helper wires
            work_wires = estimation_wires.toset() - {control_wire}
            n_reps = 2 ** (len(estimation_wires) - (i + 1))

            q = apply_controlled_Q(
                fn,
                wires=wires,
                target_wire=target_wire,
                control_wire=control_wire,
                work_wires=work_wires,
            )

            for _ in range(n_reps):
                q(*args, **kwargs)

        QFT(wires=estimation_wires).inv()

    return wrapper
"
2824,"def additive_chi2_kernel(X, Y=None):
    """"""Compute the additive chi-squared kernel between observations in X and Y.

    The chi-squared kernel is computed between each pair of rows in X and Y.  X
    and Y have to be non-negative. This kernel is most commonly applied to
    histograms.

    The chi-squared kernel is given by::

        k(x, y) = -Sum [(x - y)^2 / (x + y)]

    It can be interpreted as a weighted difference per entry.

    Read more in the :ref:`User Guide `.

    Parameters
    ----------
    X : array-like of shape (n_samples_X, n_features)
        Input array/matrix X.

    Y : ndarray of shape (n_samples_Y, n_features), default=None
        If `None`, uses `Y=X`.

    Returns
    -------
    kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
        Returns the additive chi-squared kernel between observations in X and Y.

    See Also
    --------
    chi2_kernel : The exponentiated version of the kernel, which is usually
        preferable.
    sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
        to this kernel.

    Notes
    -----
    As the negative of a distance, this kernel is only conditionally positive
    definite.

    References
    ----------
    * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
      Local features and kernels for classification of texture and object
      categories: A comprehensive study
      International Journal of Computer Vision 2007
      https://hal.archives-ouvertes.fr/hal-00171412/document
    """"""
    if issparse(X) or issparse(Y):
        raise ValueError(""additive_chi2 does not support sparse matrices."")
    X, Y = check_pairwise_arrays(X, Y)
    if (X < 0).any():
        raise ValueError(""X contains negative values."")
    if Y is not X and (Y < 0).any():
        raise ValueError(""Y contains negative values."")

    result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
    _chi2_kernel_fast(X, Y, result)
    return result

","def additive_chi2_kernel(X, Y=None):
    """"""Compute the additive chi-squared kernel between observations in X and Y.

    The chi-squared kernel is computed between each pair of rows in X and Y.  X
    and Y have to be non-negative. This kernel is most commonly applied to
    histograms.

    The chi-squared kernel is given by::

        k(x, y) = -Sum [(x - y)^2 / (x + y)]

    It can be interpreted as a weighted difference per entry.

    Read more in the :ref:`User Guide `.

    Parameters
    ----------
    X : array-like of shape (n_samples_X, n_features)
        Input array/matrix X.

    Y : ndarray of shape (n_samples_Y, n_features), default=None
        An optional second feature array. If `None`, uses `Y=X`.

    Returns
    -------
    kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
        Returns the additive chi-squared kernel between observations in X and Y.

    See Also
    --------
    chi2_kernel : The exponentiated version of the kernel, which is usually
        preferable.
    sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
        to this kernel.

    Notes
    -----
    As the negative of a distance, this kernel is only conditionally positive
    definite.

    References
    ----------
    * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
      Local features and kernels for classification of texture and object
      categories: A comprehensive study
      International Journal of Computer Vision 2007
      https://hal.archives-ouvertes.fr/hal-00171412/document
    """"""
    if issparse(X) or issparse(Y):
        raise ValueError(""additive_chi2 does not support sparse matrices."")
    X, Y = check_pairwise_arrays(X, Y)
    if (X < 0).any():
        raise ValueError(""X contains negative values."")
    if Y is not X and (Y < 0).any():
        raise ValueError(""Y contains negative values."")

    result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
    _chi2_kernel_fast(X, Y, result)
    return result

"
5530,"def validate_token(authorization, **kwargs):

    auth = authorization.split()
    if auth[0].lower() != ""bearer"":
        return

    jwt_token = auth[1]
    if not (payload := KumaOIDCAuthenticationBackend().verify_token(jwt_token)):
        return None

    issuer = payload[""iss""]
    exp = payload[""exp""]

    # # If the issuer is not Firefox Accounts raise a 404 error
    if settings.FXA_TOKEN_ISSUER != issuer:
        return None

    # Check if the token is expired
    if exp < time.time():
        return None

    return payload
","def validate_token(authorization, **kwargs):

    auth = authorization.split()
    if auth[0].lower() != ""bearer"":
        return None

    jwt_token = auth[1]
    if not (payload := KumaOIDCAuthenticationBackend().verify_token(jwt_token)):
        return None

    issuer = payload[""iss""]
    exp = payload[""exp""]

    # # If the issuer is not Firefox Accounts raise a 404 error
    if settings.FXA_TOKEN_ISSUER != issuer:
        return None

    # Check if the token is expired
    if exp < time.time():
        return None

    return payload
"
43809,"def out_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
    r""""""Calculates the Hamiltonian which imposes the constraint that each node has
    an outflow of at most one.

    The out flow constraint is, for all :math:`i`:

    .. math:: \sum_{j,(i,j)\in E}x_{ij} \leq 1,

    where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
    whether to include the edge :math:`(i, j)`.

    The corresponding qubit Hamiltonian is:

    .. math::

        \frac{1}{4}\sum_{i\in V}\left(d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
        - 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
        \left( \sum_{j,(i,j)\in E}\hat{Z}_{ij} \right)^{2}\right)


    where :math:`V` are the graph vertices and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
    upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
    :math:`1/4` constant factor.

    This Hamiltonian is minimized by selecting edges such that each node has an outflow of at most one.

    Args:
        graph (nx.DiGraph): the graph specifying possible edges

    Returns:
        qml.Hamiltonian: the out flow constraint Hamiltonian

    Raises:
        ValueError: if the input graph is not directed
    """"""
    if not hasattr(graph, ""out_edges""):
        raise ValueError(""Input graph must be directed"")

    hamiltonian = qml.Hamiltonian([], [])

    for node in graph.nodes:
        hamiltonian += _inner_out_flow_constraint_hamiltonian(graph, node)

    return hamiltonian

","def out_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
    r""""""Calculates the Hamiltonian which imposes the constraint that each node has
    an outflow of at most one.

    The out flow constraint is, for all :math:`i`:

    .. math:: \sum_{j,(i,j)\in E}x_{ij} \leq 1,

    where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
    whether to include the edge :math:`(i, j)`.

    A set of edges satisfies the out-flow constraint whenever the following Hamiltonian is minimized:

    .. math::

        \frac{1}{4}\sum_{i\in V}\left(d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
        - 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
        \left( \sum_{j,(i,j)\in E}\hat{Z}_{ij} \right)^{2}\right)


    where :math:`V` are the graph vertices and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
    upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
    :math:`1/4` constant factor.

    This Hamiltonian is minimized by selecting edges such that each node has an outflow of at most one.

    Args:
        graph (nx.DiGraph): the graph specifying possible edges

    Returns:
        qml.Hamiltonian: the out flow constraint Hamiltonian

    Raises:
        ValueError: if the input graph is not directed
    """"""
    if not hasattr(graph, ""out_edges""):
        raise ValueError(""Input graph must be directed"")

    hamiltonian = qml.Hamiltonian([], [])

    for node in graph.nodes:
        hamiltonian += _inner_out_flow_constraint_hamiltonian(graph, node)

    return hamiltonian

"
38381,"def create_profile(
    data_source,
    bin_fields,
    fields,
    n_bins=64,
    extrema=None,
    logs=None,
    units=None,
    weight_field=(""gas"", ""mass""),
    accumulation=False,
    fractional=False,
    deposition=""ngp"",
    override_bins=None,
):
    r""""""
    Create a 1, 2, or 3D profile object.

    The dimensionality of the profile object is chosen by the number of
    fields given in the bin_fields argument.

    Parameters
    ----------
    data_source : YTSelectionContainer Object
        The data object to be profiled.
    bin_fields : list of strings
        List of the binning fields for profiling.
    fields : list of strings
        The fields to be profiled.
    n_bins : int or list of ints
        The number of bins in each dimension.  If None, 64 bins for
        each bin are used for each bin field.
        Default: 64.
    extrema : dict of min, max tuples
        Minimum and maximum values of the bin_fields for the profiles.
        The keys correspond to the field names. Defaults to the extrema
        of the bin_fields of the dataset. If a units dict is provided, extrema
        are understood to be in the units specified in the dictionary.
    logs : dict of boolean values
        Whether or not to log the bin_fields for the profiles.
        The keys correspond to the field names. Defaults to the take_log
        attribute of the field.
    units : dict of strings
        The units of the fields in the profiles, including the bin_fields.
    weight_field : str or tuple field identifier
        The weight field for computing weighted average for the profile
        values.  If None, the profile values are sums of the data in
        each bin. Defaults to (""gas"", ""mass"").
    accumulation : bool or list of bools
        If True, the profile values for a bin n are the cumulative sum of
        all the values from bin 0 to n.  If -True, the sum is reversed so
        that the value for bin n is the cumulative sum from bin N (total bins)
        to n.  If the profile is 2D or 3D, a list of values can be given to
        control the summation in each dimension independently.
        Default: False.
    fractional : bool
        If True the profile values are divided by the sum of all
        the profile data such that the profile represents a probability
        distribution function.
    deposition : strings
        Controls the type of deposition used for ParticlePhasePlots.
        Valid choices are 'ngp' and 'cic'. Default is 'ngp'. This parameter is
        ignored the if the input fields are not of particle type.
    override_bins : dict of bins to profile plot with
        If set, ignores n_bins and extrema settings and uses the
        supplied bins to profile the field. If a units dict is provided,
        bins are understood to be in the units specified in the dictionary.


    Examples
    --------

    Create a 1d profile.  Access bin field from profile.x and field
    data from profile[].

    >>> ds = load(""DD0046/DD0046"")
    >>> ad = ds.all_data()
    >>> profile = create_profile(
    ...     ad, [(""gas"", ""density"")], [(""gas"", ""temperature""), (""gas"", ""velocity_x"")]
    ... )
    >>> print(profile.x)
    >>> print(profile[""gas"", ""temperature""])

    """"""
    bin_fields = data_source._determine_fields(bin_fields)
    fields = list(iter_fields(fields))
    is_pfield = [
        data_source.ds._get_field_info(f).sampling_type == ""particle""
        for f in bin_fields + fields
    ]
    wf = None
    if weight_field is not None:
        wf = data_source.ds._get_field_info(weight_field)
        is_pfield.append(wf.sampling_type == ""particle"")
        wf = wf.name

    if len(bin_fields) > 1 and isinstance(accumulation, bool):
        accumulation = [accumulation for _ in range(len(bin_fields))]

    bin_fields = data_source._determine_fields(bin_fields)
    fields = data_source._determine_fields(fields)
    units = sanitize_field_tuple_keys(units, data_source)
    extrema = sanitize_field_tuple_keys(extrema, data_source)
    logs = sanitize_field_tuple_keys(logs, data_source)
    override_bins = sanitize_field_tuple_keys(override_bins, data_source)

    if any(is_pfield) and not all(is_pfield):
        if hasattr(data_source.ds, ""_sph_ptypes""):
            is_local = [
                data_source.ds.field_info[f].sampling_type == ""local""
                for f in bin_fields + fields
            ]
            is_local_or_pfield = [pf or lf for (pf, lf) in zip(is_pfield, is_local)]
            if not all(is_local_or_pfield):
                raise YTIllDefinedProfile(
                    bin_fields, data_source._determine_fields(fields), wf, is_pfield
                )
        else:
            raise YTIllDefinedProfile(
                bin_fields, data_source._determine_fields(fields), wf, is_pfield
            )
    if len(bin_fields) == 1:
        cls = Profile1D
    elif len(bin_fields) == 2 and all(is_pfield):
        if deposition == ""cic"":
            if logs is not None:
                if (bin_fields[0] in logs and logs[bin_fields[0]]) or (
                    bin_fields[1] in logs and logs[bin_fields[1]]
                ):
                    raise RuntimeError(
                        ""CIC deposition is only implemented for linear-scaled axes""
                    )
            else:
                logs = {bin_fields[0]: False, bin_fields[1]: False}
            if any(accumulation) or fractional:
                raise RuntimeError(
                    ""The accumulation and fractional keyword arguments must be ""
                    ""False for CIC deposition""
                )
        cls = ParticleProfile
    elif len(bin_fields) == 2:
        cls = Profile2D
    elif len(bin_fields) == 3:
        cls = Profile3D
    else:
        raise NotImplementedError
    if weight_field is not None and cls == ParticleProfile:
        (weight_field,) = data_source._determine_fields([weight_field])
        wf = data_source.ds._get_field_info(weight_field)
        if not wf.sampling_type == ""particle"":
            weight_field = None
    if not is_sequence(n_bins):
        n_bins = [n_bins] * len(bin_fields)
    if not is_sequence(accumulation):
        accumulation = [accumulation] * len(bin_fields)
    if logs is None:
        logs = {}
    logs_list = []
    for bin_field in bin_fields:
        if bin_field in logs:
            logs_list.append(logs[bin_field])
        else:
            logs_list.append(data_source.ds.field_info[bin_field].take_log)
    logs = logs_list

    # Are the extrema all Nones? Then treat them as though extrema was set as None
    if extrema is not None:
        extrema_vals = list(extrema.values())
        flat_list = [item for sublist in extrema_vals for item in sublist]
        extrema_all_nones = not any(flat_list)

    if extrema is None or extrema_all_nones:
        ex = [
            data_source.quantities[""Extrema""](f, non_zero=l)
            for f, l in zip(bin_fields, logs)
        ]
        # pad extrema by epsilon so cells at bin edges are not excluded
        for i, (mi, ma) in enumerate(ex):
            mi = mi - np.spacing(mi)
            ma = ma + np.spacing(ma)
            ex[i][0], ex[i][1] = mi, ma
    else:
        ex = []
        for bin_field in bin_fields:
            bf_units = data_source.ds.field_info[bin_field].output_units
            try:
                field_ex = list(extrema[bin_field[-1]])
            except KeyError as e:
                try:
                    field_ex = list(extrema[bin_field])
                except KeyError:
                    raise RuntimeError(
                        ""Could not find field {} or {} in extrema"".format(
                            bin_field[-1], bin_field
                        )
                    ) from e

            if isinstance(field_ex[0], tuple):
                field_ex = [data_source.ds.quan(*f) for f in field_ex]
            if any([exi is None for exi in field_ex]):
                try:
                    ds_extrema = data_source.quantities.extrema(bin_field)
                except AttributeError:
                    # ytdata profile datasets don't have data_source.quantities
                    bf_vals = data_source[bin_field]
                    ds_extrema = data_source.ds.arr([bf_vals.min(), bf_vals.max()])
                for i, exi in enumerate(field_ex):
                    if exi is None:
                        field_ex[i] = ds_extrema[i]
                        # pad extrema by epsilon so cells at bin edges are
                        # not excluded
                        field_ex[i] -= (-1) ** i * np.spacing(field_ex[i])
            if units is not None and bin_field in units:
                for i, exi in enumerate(field_ex):
                    if hasattr(exi, ""units""):
                        field_ex[i] = exi.to(units[bin_field])
                    else:
                        field_ex[i] = data_source.ds.quan(exi, units[bin_field])
                fe = data_source.ds.arr(field_ex)
            else:
                if hasattr(field_ex, ""units""):
                    fe = field_ex.to(bf_units)
                else:
                    fe = data_source.ds.arr(field_ex, bf_units)
            fe.convert_to_units(bf_units)
            field_ex = [fe[0].v, fe[1].v]
            if is_sequence(field_ex[0]):
                field_ex[0] = data_source.ds.quan(field_ex[0][0], field_ex[0][1])
                field_ex[0] = field_ex[0].in_units(bf_units)
            if is_sequence(field_ex[1]):
                field_ex[1] = data_source.ds.quan(field_ex[1][0], field_ex[1][1])
                field_ex[1] = field_ex[1].in_units(bf_units)
            ex.append(field_ex)

    if override_bins is not None:
        o_bins = []
        for bin_field in bin_fields:
            bf_units = data_source.ds.field_info[bin_field].output_units
            try:
                field_obin = override_bins[bin_field[-1]]
            except KeyError:
                field_obin = override_bins[bin_field]

            if field_obin is None:
                o_bins.append(None)
                continue

            if isinstance(field_obin, tuple):
                field_obin = data_source.ds.arr(*field_obin)

            if units is not None and bin_field in units:
                fe = data_source.ds.arr(field_obin, units[bin_field])
            else:
                if hasattr(field_obin, ""units""):
                    fe = field_obin.to(bf_units)
                else:
                    fe = data_source.ds.arr(field_obin, bf_units)
            fe.convert_to_units(bf_units)
            field_obin = fe.d
            o_bins.append(field_obin)

    args = [data_source]
    for f, n, (mi, ma), l in zip(bin_fields, n_bins, ex, logs):
        if mi <= 0 and l:
            raise YTIllDefinedBounds(mi, ma)
        args += [f, n, mi, ma, l]
    kwargs = dict(weight_field=weight_field)
    if cls is ParticleProfile:
        kwargs[""deposition""] = deposition
    if override_bins is not None:
        for o_bin, ax in zip(o_bins, [""x"", ""y"", ""z""]):
            kwargs[f""override_bins_{ax}""] = o_bin
    obj = cls(*args, **kwargs)
    obj.accumulation = accumulation
    obj.fractional = fractional
    if fields is not None:
        obj.add_fields([field for field in fields])
    for field in fields:
        if fractional:
            obj.field_data[field] /= obj.field_data[field].sum()
        for axis, acc in enumerate(accumulation):
            if not acc:
                continue
            temp = obj.field_data[field]
            temp = np.rollaxis(temp, axis)
            if weight_field is not None:
                temp_weight = obj.weight
                temp_weight = np.rollaxis(temp_weight, axis)
            if acc < 0:
                temp = temp[::-1]
                if weight_field is not None:
                    temp_weight = temp_weight[::-1]
            if weight_field is None:
                temp = temp.cumsum(axis=0)
            else:
                temp = (temp * temp_weight).cumsum(axis=0) / temp_weight.cumsum(axis=0)
            if acc < 0:
                temp = temp[::-1]
                if weight_field is not None:
                    temp_weight = temp_weight[::-1]
            temp = np.rollaxis(temp, axis)
            obj.field_data[field] = temp
            if weight_field is not None:
                temp_weight = np.rollaxis(temp_weight, axis)
                obj.weight = temp_weight
    if units is not None:
        for field, unit in units.items():
            field = data_source._determine_fields(field)[0]
            if field == obj.x_field:
                obj.set_x_unit(unit)
            elif field == getattr(obj, ""y_field"", None):
                obj.set_y_unit(unit)
            elif field == getattr(obj, ""z_field"", None):
                obj.set_z_unit(unit)
            else:
                obj.set_field_unit(field, unit)
    return obj
","def create_profile(
    data_source,
    bin_fields,
    fields,
    n_bins=64,
    extrema=None,
    logs=None,
    units=None,
    weight_field=(""gas"", ""mass""),
    accumulation=False,
    fractional=False,
    deposition=""ngp"",
    override_bins=None,
):
    r""""""
    Create a 1, 2, or 3D profile object.

    The dimensionality of the profile object is chosen by the number of
    fields given in the bin_fields argument.

    Parameters
    ----------
    data_source : YTSelectionContainer Object
        The data object to be profiled.
    bin_fields : list of strings
        List of the binning fields for profiling.
    fields : list of strings
        The fields to be profiled.
    n_bins : int or list of ints
        The number of bins in each dimension.  If None, 64 bins for
        each bin are used for each bin field.
        Default: 64.
    extrema : dict of min, max tuples
        Minimum and maximum values of the bin_fields for the profiles.
        The keys correspond to the field names. Defaults to the extrema
        of the bin_fields of the dataset. If a units dict is provided, extrema
        are understood to be in the units specified in the dictionary.
    logs : dict of boolean values
        Whether or not to log the bin_fields for the profiles.
        The keys correspond to the field names. Defaults to the take_log
        attribute of the field.
    units : dict of strings
        The units of the fields in the profiles, including the bin_fields.
    weight_field : str or tuple field identifier
        The weight field for computing weighted average for the profile
        values.  If None, the profile values are sums of the data in
        each bin. Defaults to (""gas"", ""mass"").
    accumulation : bool or list of bools
        If True, the profile values for a bin n are the cumulative sum of
        all the values from bin 0 to n.  If -True, the sum is reversed so
        that the value for bin n is the cumulative sum from bin N (total bins)
        to n.  If the profile is 2D or 3D, a list of values can be given to
        control the summation in each dimension independently.
        Default: False.
    fractional : bool
        If True the profile values are divided by the sum of all
        the profile data such that the profile represents a probability
        distribution function.
    deposition : strings
        Controls the type of deposition used for ParticlePhasePlots.
        Valid choices are 'ngp' and 'cic'. Default is 'ngp'. This parameter is
        ignored the if the input fields are not of particle type.
    override_bins : dict of bins to profile plot with
        If set, ignores n_bins and extrema settings and uses the
        supplied bins to profile the field. If a units dict is provided,
        bins are understood to be in the units specified in the dictionary.


    Examples
    --------

    Create a 1d profile.  Access bin field from profile.x and field
    data from profile[].

    >>> ds = load(""DD0046/DD0046"")
    >>> ad = ds.all_data()
    >>> profile = create_profile(
    ...     ad, [(""gas"", ""density"")], [(""gas"", ""temperature""), (""gas"", ""velocity_x"")]
    ... )
    >>> print(profile.x)
    >>> print(profile[""gas"", ""temperature""])

    """"""
    bin_fields = data_source._determine_fields(bin_fields)
    fields = list(iter_fields(fields))
    is_pfield = [
        data_source.ds._get_field_info(f).sampling_type == ""particle""
        for f in bin_fields + fields
    ]
    wf = None
    if weight_field is not None:
        wf = data_source.ds._get_field_info(weight_field)
        is_pfield.append(wf.sampling_type == ""particle"")
        wf = wf.name

    if len(bin_fields) > 1 and isinstance(accumulation, bool):
        accumulation = [accumulation for _ in range(len(bin_fields))]

    bin_fields = data_source._determine_fields(bin_fields)
    fields = data_source._determine_fields(fields)
    units = sanitize_field_tuple_keys(units, data_source)
    extrema = sanitize_field_tuple_keys(extrema, data_source)
    logs = sanitize_field_tuple_keys(logs, data_source)
    override_bins = sanitize_field_tuple_keys(override_bins, data_source)

    if any(is_pfield) and not all(is_pfield):
        if hasattr(data_source.ds, ""_sph_ptypes""):
            is_local = [
                data_source.ds.field_info[f].sampling_type == ""local""
                for f in bin_fields + fields
            ]
            is_local_or_pfield = [pf or lf for (pf, lf) in zip(is_pfield, is_local)]
            if not all(is_local_or_pfield):
                raise YTIllDefinedProfile(
                    bin_fields, data_source._determine_fields(fields), wf, is_pfield
                )
        else:
            raise YTIllDefinedProfile(
                bin_fields, data_source._determine_fields(fields), wf, is_pfield
            )
    if len(bin_fields) == 1:
        cls = Profile1D
    elif len(bin_fields) == 2 and all(is_pfield):
        if deposition == ""cic"":
            if logs is not None:
                if (bin_fields[0] in logs and logs[bin_fields[0]]) or (
                    bin_fields[1] in logs and logs[bin_fields[1]]
                ):
                    raise RuntimeError(
                        ""CIC deposition is only implemented for linear-scaled axes""
                    )
            else:
                logs = {bin_fields[0]: False, bin_fields[1]: False}
            if any(accumulation) or fractional:
                raise RuntimeError(
                    ""The accumulation and fractional keyword arguments must be ""
                    ""False for CIC deposition""
                )
        cls = ParticleProfile
    elif len(bin_fields) == 2:
        cls = Profile2D
    elif len(bin_fields) == 3:
        cls = Profile3D
    else:
        raise NotImplementedError
    if weight_field is not None and cls == ParticleProfile:
        (weight_field,) = data_source._determine_fields([weight_field])
        wf = data_source.ds._get_field_info(weight_field)
        if not wf.sampling_type == ""particle"":
            weight_field = None
    if not is_sequence(n_bins):
        n_bins = [n_bins] * len(bin_fields)
    if not is_sequence(accumulation):
        accumulation = [accumulation] * len(bin_fields)
    if logs is None:
        logs = {}
    logs_list = []
    for bin_field in bin_fields:
        if bin_field in logs:
            logs_list.append(logs[bin_field])
        else:
            logs_list.append(data_source.ds.field_info[bin_field].take_log)
    logs = logs_list

    # Are the extrema all Nones? Then treat them as though extrema was set as None
    if extrema is None or not any(collapse(extrema.values())):
        ex = [
            data_source.quantities[""Extrema""](f, non_zero=l)
            for f, l in zip(bin_fields, logs)
        ]
        # pad extrema by epsilon so cells at bin edges are not excluded
        for i, (mi, ma) in enumerate(ex):
            mi = mi - np.spacing(mi)
            ma = ma + np.spacing(ma)
            ex[i][0], ex[i][1] = mi, ma
    else:
        ex = []
        for bin_field in bin_fields:
            bf_units = data_source.ds.field_info[bin_field].output_units
            try:
                field_ex = list(extrema[bin_field[-1]])
            except KeyError as e:
                try:
                    field_ex = list(extrema[bin_field])
                except KeyError:
                    raise RuntimeError(
                        ""Could not find field {} or {} in extrema"".format(
                            bin_field[-1], bin_field
                        )
                    ) from e

            if isinstance(field_ex[0], tuple):
                field_ex = [data_source.ds.quan(*f) for f in field_ex]
            if any([exi is None for exi in field_ex]):
                try:
                    ds_extrema = data_source.quantities.extrema(bin_field)
                except AttributeError:
                    # ytdata profile datasets don't have data_source.quantities
                    bf_vals = data_source[bin_field]
                    ds_extrema = data_source.ds.arr([bf_vals.min(), bf_vals.max()])
                for i, exi in enumerate(field_ex):
                    if exi is None:
                        field_ex[i] = ds_extrema[i]
                        # pad extrema by epsilon so cells at bin edges are
                        # not excluded
                        field_ex[i] -= (-1) ** i * np.spacing(field_ex[i])
            if units is not None and bin_field in units:
                for i, exi in enumerate(field_ex):
                    if hasattr(exi, ""units""):
                        field_ex[i] = exi.to(units[bin_field])
                    else:
                        field_ex[i] = data_source.ds.quan(exi, units[bin_field])
                fe = data_source.ds.arr(field_ex)
            else:
                if hasattr(field_ex, ""units""):
                    fe = field_ex.to(bf_units)
                else:
                    fe = data_source.ds.arr(field_ex, bf_units)
            fe.convert_to_units(bf_units)
            field_ex = [fe[0].v, fe[1].v]
            if is_sequence(field_ex[0]):
                field_ex[0] = data_source.ds.quan(field_ex[0][0], field_ex[0][1])
                field_ex[0] = field_ex[0].in_units(bf_units)
            if is_sequence(field_ex[1]):
                field_ex[1] = data_source.ds.quan(field_ex[1][0], field_ex[1][1])
                field_ex[1] = field_ex[1].in_units(bf_units)
            ex.append(field_ex)

    if override_bins is not None:
        o_bins = []
        for bin_field in bin_fields:
            bf_units = data_source.ds.field_info[bin_field].output_units
            try:
                field_obin = override_bins[bin_field[-1]]
            except KeyError:
                field_obin = override_bins[bin_field]

            if field_obin is None:
                o_bins.append(None)
                continue

            if isinstance(field_obin, tuple):
                field_obin = data_source.ds.arr(*field_obin)

            if units is not None and bin_field in units:
                fe = data_source.ds.arr(field_obin, units[bin_field])
            else:
                if hasattr(field_obin, ""units""):
                    fe = field_obin.to(bf_units)
                else:
                    fe = data_source.ds.arr(field_obin, bf_units)
            fe.convert_to_units(bf_units)
            field_obin = fe.d
            o_bins.append(field_obin)

    args = [data_source]
    for f, n, (mi, ma), l in zip(bin_fields, n_bins, ex, logs):
        if mi <= 0 and l:
            raise YTIllDefinedBounds(mi, ma)
        args += [f, n, mi, ma, l]
    kwargs = dict(weight_field=weight_field)
    if cls is ParticleProfile:
        kwargs[""deposition""] = deposition
    if override_bins is not None:
        for o_bin, ax in zip(o_bins, [""x"", ""y"", ""z""]):
            kwargs[f""override_bins_{ax}""] = o_bin
    obj = cls(*args, **kwargs)
    obj.accumulation = accumulation
    obj.fractional = fractional
    if fields is not None:
        obj.add_fields([field for field in fields])
    for field in fields:
        if fractional:
            obj.field_data[field] /= obj.field_data[field].sum()
        for axis, acc in enumerate(accumulation):
            if not acc:
                continue
            temp = obj.field_data[field]
            temp = np.rollaxis(temp, axis)
            if weight_field is not None:
                temp_weight = obj.weight
                temp_weight = np.rollaxis(temp_weight, axis)
            if acc < 0:
                temp = temp[::-1]
                if weight_field is not None:
                    temp_weight = temp_weight[::-1]
            if weight_field is None:
                temp = temp.cumsum(axis=0)
            else:
                temp = (temp * temp_weight).cumsum(axis=0) / temp_weight.cumsum(axis=0)
            if acc < 0:
                temp = temp[::-1]
                if weight_field is not None:
                    temp_weight = temp_weight[::-1]
            temp = np.rollaxis(temp, axis)
            obj.field_data[field] = temp
            if weight_field is not None:
                temp_weight = np.rollaxis(temp_weight, axis)
                obj.weight = temp_weight
    if units is not None:
        for field, unit in units.items():
            field = data_source._determine_fields(field)[0]
            if field == obj.x_field:
                obj.set_x_unit(unit)
            elif field == getattr(obj, ""y_field"", None):
                obj.set_y_unit(unit)
            elif field == getattr(obj, ""z_field"", None):
                obj.set_z_unit(unit)
            else:
                obj.set_field_unit(field, unit)
    return obj
"
22041,"def download_google_bigquery_table(project, dataset, table, columns=None, condition=None, export=None, client_project=None, credentials=None):
    '''Download (stream) an entire Google BigQuery table locally.

    :param str project: The Google BigQuery project that owns the table.
    :param str dataset: The dataset the table is part of.
    :param str table: The name of the table
    :param list columns: A list of columns (field names) to download. If None, all columns will be downloaded.
    :param str condition: SQL text filtering statement, similar to a WHERE clause in a query. Aggregates are not supported.
    :param str export: Pass an filename or path to download the table as an Apache Arrow file, and leverage memory mapping. If `None` the DataFrame is in memory.
    :param str client_project: The ID of the project that executes the query. Will be passed when creating a job. If `None`, it will be set with the same value as `project`.
    :param credentials: The authorization credentials to attach to requests. See google.auth.credentials.Credentials for more details.
    :rtype: DataFrame

    Example:

    >>> import os
    >>> os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../path/to/project_access_key.json'
    >>> from vaex.contrib.io.gbq import download_google_bigquery_table

    >>> client_project = 'my_project_id'
    >>> project = 'bigquery-public-data'
    >>> dataset = 'ml_datasets'
    >>> table = 'iris'
    >>> columns = ['species', 'sepal_width', 'petal_width']
    >>> conditions = 'species = ""virginica""'
    >>> df = download_google_bigquery_table(project=project,
                                            dataset=dataset,
                                            table=table,
                                            columns=columns,
                                            condition=conditions,
                                            client_project=client_project)
    >>> df.head(3)
    #    sepal_width    petal_width  species
    0            2.5            1.7  virginica
    1            2.5            2    virginica
    2            2.2            1.5  virginica
    >>>

    '''
    # Instantiate the table path and the reading session
    bq_table = f'projects/{project}/datasets/{dataset}/tables/{table}'
    req_sess = google.cloud.bigquery_storage.types.ReadSession(table=bq_table, data_format=google.cloud.bigquery_storage.types.DataFormat.ARROW)

    # Read options
    req_sess.read_options.selected_fields = columns
    req_sess.read_options.row_restriction = condition

    # Instantiate the reading client
    client = google.cloud.bigquery_storage.BigQueryReadClient(credentials=credentials)

    parent = f'projects/{client_project or project}'
    session = client.create_read_session(parent=parent, read_session=req_sess, max_stream_count=1)
    reader = client.read_rows(session.streams[0].name)

    if export is None:
        arrow_table = reader.to_arrow(session)
        return vaex.from_arrow_table(arrow_table)

    else:
        # We need to get the schema first - Get one RecordsBatch manually to get the schema
        # Get the pages iterator
        pages = reader.rows(session).pages
        # Get the first batch
        first_batch = pages.__next__().to_arrow()
        # Get the schema
        schema = first_batch.schema

        # This does the writing - streams the batches to disk!
        with vaex.file.open(path=export, mode='wb') as sink:
            with pa.RecordBatchStreamWriter(sink, schema) as writer:
                writer.write_batch(first_batch)
                for page in pages:
                    batch = page.to_arrow()
                    writer.write_batch(batch)

        return vaex.open(export)

","def download_google_bigquery_table(project, dataset, table, columns=None, condition=None, export=None, client_project=None, credentials=None):
    '''Download (stream) an entire Google BigQuery table locally.

    :param str project: The Google BigQuery project that owns the table.
    :param str dataset: The dataset the table is part of.
    :param str table: The name of the table
    :param list columns: A list of columns (field names) to download. If None, all columns will be downloaded.
    :param str condition: SQL text filtering statement, similar to a WHERE clause in a query. Aggregates are not supported.
    :param str export: Pass an filename or path to download the table as an Apache Arrow file, and leverage memory mapping. If `None` the DataFrame is in memory.
    :param str client_project: The ID of the project that executes the query. Will be passed when creating a job. If `None`, it will be set with the same value as `project`.
    :param credentials: The authorization credentials to attach to requests. See google.auth.credentials.Credentials for more details.
    :rtype: DataFrame

    Example:

    >>> import os
    >>> os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../path/to/project_access_key.json'
    >>> from vaex.contrib.io.gbq import download_google_bigquery_table

    >>> client_project = 'my_project_id'
    >>> project = 'bigquery-public-data'
    >>> dataset = 'ml_datasets'
    >>> table = 'iris'
    >>> columns = ['species', 'sepal_width', 'petal_width']
    >>> conditions = 'species = ""virginica""'
    >>> df = download_google_bigquery_table(project=project,
                                            dataset=dataset,
                                            table=table,
                                            columns=columns,
                                            condition=conditions,
                                            client_project=client_project)
    >>> df.head(3)
    #    sepal_width    petal_width  species
    0            2.5            1.7  virginica
    1            2.5            2    virginica
    2            2.2            1.5  virginica
    >>>

    '''
    # Instantiate the table path and the reading session
    bq_table = f'projects/{project}/datasets/{dataset}/tables/{table}'
    req_sess = google.cloud.bigquery_storage.types.ReadSession(table=bq_table, data_format=google.cloud.bigquery_storage.types.DataFormat.ARROW)

    bq_table = table_path_template.format(project=project, dataset=dataset, table=table)
    req_sess.read_options.selected_fields = columns
    req_sess.read_options.row_restriction = condition

    # Instantiate the reading client
    client = google.cloud.bigquery_storage.BigQueryReadClient(credentials=credentials)

    parent = f'projects/{client_project or project}'
    session = client.create_read_session(parent=parent, read_session=req_sess, max_stream_count=1)
    reader = client.read_rows(session.streams[0].name)

    if export is None:
        arrow_table = reader.to_arrow(session)
        return vaex.from_arrow_table(arrow_table)

    else:
        # We need to get the schema first - Get one RecordsBatch manually to get the schema
        # Get the pages iterator
        pages = reader.rows(session).pages
        # Get the first batch
        first_batch = pages.__next__().to_arrow()
        # Get the schema
        schema = first_batch.schema

        # This does the writing - streams the batches to disk!
        with vaex.file.open(path=export, mode='wb') as sink:
            with pa.RecordBatchStreamWriter(sink, schema) as writer:
                writer.write_batch(first_batch)
                for page in pages:
                    batch = page.to_arrow()
                    writer.write_batch(batch)

        return vaex.open(export)

"
26028,"def load_arguments(self, _):
    # Model imports
    DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes', operation_group='disks')
    SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
    UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
    HyperVGenerationTypes = self.get_models('HyperVGenerationTypes')
    DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
    OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
    RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux')
    GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries')
    ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions')

    # REUSABLE ARGUMENT DEFINITIONS
    name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
    multi_ids_type = CLIArgumentType(nargs='+')
    existing_vm_name = CLIArgumentType(overrides=name_arg_type,
                                       configured_default='vm',
                                       help=""The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=`"",
                                       completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
    existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
    existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
    vmss_name_type = CLIArgumentType(name_arg_type,
                                     configured_default='vmss',
                                     completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
                                     help=""Scale set name. You can configure the default using `az configure --defaults vmss=`"",
                                     id_part='name')

    extension_instance_name_type = CLIArgumentType(help=""Name of extension instance, which can be customized. Default: name of the extension."")
    image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
    disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
    ephemeral_placement_type = CLIArgumentType(options_list=['--ephemeral-os-disk-placement', '--ephemeral-placement'], arg_type=get_enum_type(['ResourceDisk', 'CacheDisk']), min_api='2019-12-01')

    license_type = CLIArgumentType(
        help=""Specifies that the Windows image or disk was licensed on-premises. To enable Azure Hybrid Benefit for ""
             ""Windows Server, use 'Windows_Server'. To enable Multi-tenant Hosting Rights for Windows 10, ""
             ""use 'Windows_Client'. For more information see the Azure Windows VM online docs."",
        arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_BASE',
                                'RHEL_SAPAPPS', 'RHEL_SAPHA', 'RHEL_EUS', 'RHEL_BASESAPAPPS', 'RHEL_BASESAPHA', 'SLES_STANDARD', 'SLES', 'SLES_SAP', 'SLES_HPC',
                                'None', 'RHEL_ELS_6']))

    # StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
    DiskStorageAccountTypes = DiskStorageAccountTypes or self.get_models('StorageAccountTypes')

    if DiskStorageAccountTypes:
        disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
    else:
        # StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
        # However, 2017-03-09-profile targets version 2016-03-30 of compute package.
        disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))

    if SnapshotStorageAccountTypes:
        snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
    else:
        # SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
        # However, 2017-03-09-profile targets version 2016-03-30 of compute package.
        snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))

    # special case for `network nic scale-set list` command alias
    with self.argument_context('network nic scale-set list') as c:
        c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')

    HyperVGenerationTypes = HyperVGenerationTypes or self.get_models('HyperVGeneration', operation_group='disks')
    if HyperVGenerationTypes:
        hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default=""V1""))
    else:
        hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type([""V1"", ""V2""], default=""V1""))

    ultra_ssd_enabled_type = CLIArgumentType(
        arg_type=get_three_state_flag(), min_api='2018-06-01',
        help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')

    scale_in_policy_type = CLIArgumentType(
        nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
        help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
    )

    edge_zone_type = CLIArgumentType(
        help='The name of edge zone.',
        min_api='2020-12-01',
        is_preview=True
    )

    t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries')
    shared_to_type = CLIArgumentType(
        arg_type=get_enum_type(t_shared_to),
        help='The query parameter to decide what shared galleries to fetch when doing listing operations. '
             'If not specified, list by subscription id.'
    )

    marker_type = CLIArgumentType(
        help='A string value that identifies the portion of the list of containers to be '
             'returned with the next listing operation. The operation returns the NextMarker value within '
             'the response body if the listing operation did not return all containers remaining to be listed '
             'with the current page. If specified, this generator will begin returning results from the point '
             'where the previous generator stopped.')

    enable_vtpm_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.')
    enable_secure_boot_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.')
    security_type = CLIArgumentType(arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01', help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.')

    # region MixedScopes
    for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
        with self.argument_context(scope) as c:
            c.argument('tags', tags_type)

    for scope in ['disk', 'snapshot']:
        with self.argument_context(scope) as c:
            c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
            c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
            c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
            c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
            if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
                c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
                c.argument('for_upload', arg_type=get_three_state_flag(),
                           help='Create the {0} for uploading blobs later on through storage commands. Run ""az {0} grant-access --access-level Write"" to retrieve the {0}\'s SAS token.'.format(scope))
                c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
            else:
                c.ignore('access_level', 'for_upload', 'hyper_v_generation')
            c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType', operation_group='disks')),
                       help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
            c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
            c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
            operation_group = 'disks' if scope == 'disk' else 'snapshots'
            c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group)))
            c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.')
            c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable on-demand bursting beyond the provisioned performance target of the disk. On-demand bursting is disabled by default, and it does not apply to Ultra disks.')
            c.argument('public_network_access', arg_type=get_enum_type(['Disabled', 'Enabled']), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to control the export policy on the disk.')
            c.argument('accelerated_network', arg_type=get_three_state_flag(), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to enable the accelerated networking if the OS disk image support.')

    for scope in ['disk create', 'snapshot create']:
        with self.argument_context(scope) as c:
            c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
    # endregion

    # region Disks
    with self.argument_context('disk', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c:
        c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone'])  # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
        c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
        c.argument('name', arg_type=name_arg_type)
        c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
        c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
        c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
        c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help=""The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10"")
        c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
                   help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
        c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
        c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
        c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
        c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
        c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
        c.argument('gallery_image_reference', help='ID of the Compute Gallery image version from which to create a disk')
        c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
        c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.')
        c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.')
        c.argument('edge_zone', edge_zone_type)
        c.argument('security_type', arg_type=get_enum_type(self.get_models('DiskSecurityTypes', operation_group='disks')), help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01')
        c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01')
        c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='disks')), min_api='2021-12-01', help='CPU architecture.')
        c.argument('data_access_auth_mode', arg_type=get_enum_type(['AzureActiveDirectory', 'None']), min_api='2021-12-01', help='Specify the auth mode when exporting or uploading to a disk or snapshot.')
    # endregion

    # region Snapshots
    with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
        c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
        c.argument('name', arg_type=name_arg_type)
        c.argument('sku', arg_type=snapshot_sku)
        c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
                   help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
        c.argument('edge_zone', edge_zone_type)
        c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01',
                   help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.')
        c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='snapshots')), min_api='2021-12-01', help='CPU architecture.')
    # endregion

    # region Images
    with self.argument_context('image') as c:
        c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
        c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
        c.argument('tags', tags_type)

    with self.argument_context('image create') as c:
        # here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
        c.argument('name', arg_type=name_arg_type, help='new image name')
        c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
        c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
        c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
                   'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
        c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
        c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help=""Storage caching type for the image's OS disk."")
        c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
                   help=""Storage caching type for the image's data disk."")
        c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api=""2019-03-01"", help='The hypervisor generation of the Virtual Machine created from the image.')
        c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
        c.argument('edge_zone', edge_zone_type, )
    # endregion

    # region Image Templates
    with self.argument_context('image builder') as c:
        ib_output_name_help = ""Name of the image builder run output.""

        c.argument('location', get_location_type(self.cli_ctx))
        c.argument('scripts', nargs='+', help=""Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL.""
                                              "" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'"")
        c.argument('source', options_list=[""--image-source"", ""-i""], help=""The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID."")
        c.argument('image_template_name', image_template_name_type, help=""The name of the image template."")
        c.argument('checksum', help=""The SHA256 checksum of the Red Hat ISO image"")
        c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g ""image_1=westus2 image_2=westus"". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
        c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g ""my_gallery_1/image_def_1=eastus,westus  my_gallery_2/image_def_2=uksouth,canadaeast,francesouth."" '
                                                                'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a ""/"". Each value is a comma-delimited list of replica locations.')
        c.argument('output_name', help=ib_output_name_help)
        c.ignore('destinations_lists', 'scripts_list', 'source_dict')

    with self.argument_context('image builder create') as c:
        ib_source_type = CLIArgumentType(arg_group=""Image Source"")
        ib_customizer_type = CLIArgumentType(arg_group=""Customizer"")
        ib_cutput_type = CLIArgumentType(arg_group=""Output"")

        c.argument('build_timeout', type=int, help=""The Maximum duration to wait while building the image template, in minutes. Default is 60."")
        c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json')
        c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')

        # VM profile
        c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)')
        c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size')
        c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name')
        c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine')
        c.argument('proxy_vm_size', help='Size of the virtual machine used to build, customize and capture images (Standard_D1_v2 for Gen1 images and Standard_D2ds_v4 for Gen2 images).')
        c.argument('build_vm_identities', nargs='+', help='Optional configuration of the virtual network to use to deploy the build virtual machine in. Omit if no specific virtual network needs to be used.')

        # Image Source Arguments
        c.argument('source', arg_type=ib_source_type)
        c.argument('checksum', arg_type=ib_source_type)
        c.argument('', arg_type=ib_source_type)

        # Image Customizer Arguments
        c.argument('scripts', arg_type=ib_customizer_type)
        c.argument('', arg_type=ib_customizer_type)
        c.argument('', arg_type=ib_customizer_type)

        # Image Output Arguments
        c.argument('managed_image_destinations', arg_type=ib_cutput_type)
        c.argument('shared_image_destinations', arg_type=ib_cutput_type)
        c.argument('output_name', arg_type=ib_cutput_type)

    with self.argument_context('image builder output') as c:
        ib_sig_regions_help = ""Space-separated list of regions to replicate the image version into.""
        ib_img_location_help = ""Location where the customized image will be created.""

        c.argument('gallery_image_definition', arg_group=""Shared Image Gallery"", help=""Name or ID of the existing SIG image definition to create the customized image version with."")
        c.argument('gallery_name', arg_group=""Shared Image Gallery"", help=""Shared image gallery name, if image definition name and not ID was provided."")
        c.argument('gallery_replication_regions', arg_group=""Shared Image Gallery"", nargs='+', help=ib_sig_regions_help)
        c.argument('managed_image', arg_group=""Managed Image"", help=""Name or ID of the customized managed image to be created."")
        c.argument('managed_image_location', arg_group=""Managed Image"", help=ib_img_location_help)

    with self.argument_context('image builder output add') as c:
        ib_artifact_tags_help = ""Tags that will be applied to the output artifact once it has been created by the distributor. "" + tags_type.settings['help']
        ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=[""--artifact-tags""])
        ib_default_loc_help = "" Defaults to resource group's location.""

        c.argument('output_name', help=ib_output_name_help + "" Defaults to the name of the managed image or sig image definition."")
        c.argument('gallery_replication_regions', arg_group=""Shared Image Gallery"", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
        c.argument('managed_image_location', arg_group=""Managed Image"", help=ib_img_location_help + ib_default_loc_help)
        c.argument('is_vhd', arg_group=""VHD"", help=""The output is a VHD distributor."", action='store_true')
        c.argument('tags', arg_type=ib_artifact_tags_type)
        c.ignore('location')

    with self.argument_context('image builder customizer') as c:
        ib_win_restart_type = CLIArgumentType(arg_group=""Windows Restart"")
        ib_win_update_type = CLIArgumentType(arg_group=""Windows Update"")
        ib_script_type = CLIArgumentType(arg_group=""Shell and Powershell"")
        ib_powershell_type = CLIArgumentType(arg_group=""Powershell"")
        ib_file_customizer_type = CLIArgumentType(arg_group=""File"")

        c.argument('customizer_name', help=""Name of the customizer."")
        c.argument('customizer_type', options_list=['--type', '-t'], help=""Type of customizer to be added to the image template."", arg_type=get_enum_type(ScriptType))

        # Script Args
        c.argument('script_url', arg_type=ib_script_type, help=""URL of script to customize the image with. The URL must be publicly accessible."")
        c.argument('inline_script', arg_type=ib_script_type, nargs='+', help=""Space-separated list of inline script lines to customize the image with."")

        # Powershell Specific Args
        c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help=""Space-separated list of valid exit codes, as integers"")

        # Windows Restart Specific Args
        c.argument('restart_command', arg_type=ib_win_restart_type, help=""Command to execute the restart operation."")
        c.argument('restart_check_command', arg_type=ib_win_restart_type, help=""Command to verify that restart succeeded."")
        c.argument('restart_timeout', arg_type=ib_win_restart_type, help=""Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)"", default=""5m"")

        # Windows Update Specific Args
        c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
        c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
        c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')

        # File Args
        c.argument('file_source', arg_type=ib_file_customizer_type, help=""The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc."")
        c.argument('dest_path', arg_type=ib_file_customizer_type, help=""The absolute destination path where the file specified in --file-source will be downloaded to in the image"")

    # endregion

    # region AvailabilitySets
    with self.argument_context('vm availability-set') as c:
        c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')

    with self.argument_context('vm availability-set create') as c:
        c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
        c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
        c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
        c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
        c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')

    with self.argument_context('vm availability-set update') as c:
        if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
            c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
            c.argument('availability_set_name', options_list=['--availability-set-name'])
    # endregion

    # region VirtualMachines
    with self.argument_context('vm') as c:
        c.argument('vm_name', existing_vm_name)
        c.argument('size', completer=get_vm_size_completion_list)
        c.argument('name', arg_type=name_arg_type)
        c.argument('zone', zone_type, min_api='2017-03-30')
        c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
        c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify """" for none.', arg_group='Network')
        c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
        c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
        c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
    with self.argument_context('vm capture') as c:
        c.argument('overwrite', action='store_true')

    with self.argument_context('vm update') as c:
        c.argument('os_disk', min_api='2017-12-01', help=""Managed OS disk ID or name to swap to"")
        c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
                   help=""enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2"")
        c.argument('disk_caching', nargs='*', help=""Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks"")
        c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
        c.argument('enable_secure_boot', enable_secure_boot_type)
        c.argument('enable_vtpm', enable_vtpm_type)
        c.argument('size', help='The new size of the virtual machine. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True)
        c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type,
                   help='Only applicable when used with `--size`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True)
        c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.')

    with self.argument_context('vm create') as c:
        c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
        c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
        c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
        c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
        c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
        c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify """" for none (\'""""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
        c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
        c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
        c.argument('boot_diagnostics_storage',
                   help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
        c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
                   help=""enable accelerated networking. Unless specified, CLI will enable it based on machine image and size"")
        if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
            VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
            c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
                       arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
                       help=""The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine"")
        c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
                   help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
        c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
                   help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
        c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
                   help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
        c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.')
        c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01')
        c.argument('platform_fault_domain', min_api='2020-06-01',
                   help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view')
        c.argument('count', type=int, is_preview=True,
                   help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.')
        c.argument('security_type', security_type)
        c.argument('enable_secure_boot', enable_secure_boot_type)
        c.argument('enable_vtpm', enable_vtpm_type)
        c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
        c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.')

    with self.argument_context('vm create', arg_group='Storage') as c:
        c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
        c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')

    with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
        c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help=""Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together."")
        c.argument('dedicated_host', options_list=['--host'], is_preview=True, help=""Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together."")

    with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c:
        c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help=""Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples."")
        c.argument('dedicated_host', options_list=['--host'], is_preview=True, help=""Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples."")

    with self.argument_context('vm open-port') as c:
        c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
        c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
        c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
        c.argument('port', help=""The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range."")
        c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)

    for scope in ['vm show', 'vm list']:
        with self.argument_context(scope) as c:
            c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')

    for scope in ['vm show', 'vmss show']:
        with self.argument_context(scope) as c:
            c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01')

    for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']:
        with self.argument_context(scope) as c:
            c.ignore('include_user_data')

    with self.argument_context('vm diagnostics') as c:
        c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])

    with self.argument_context('vm diagnostics set') as c:
        c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))

    with self.argument_context('vm install-patches') as c:
        c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)')
        c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.')
        c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.')
        c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.')
        c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only')
        c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only')
        c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help=""Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only"")
        c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
        c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')

    with self.argument_context('vm disk') as c:
        c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
        c.argument('new', action='store_true', help='create a new disk')
        c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
        c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
        c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')

    with self.argument_context('vm disk attach') as c:
        c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
        c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
                   help=""The name or ID of the managed disk"", id_part='name',
                   completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
        c.argument('disks', nargs='*', help=""One or more names or IDs of the managed disk (space-delimited)."",
                   completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
        c.argument('ids', deprecate_info=c.deprecate(target='--ids', redirect='--disks', hide=True))

    with self.argument_context('vm disk detach') as c:
        c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')

    with self.argument_context('vm encryption enable') as c:
        c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted. (Only supported for Linux virtual machines.)')
        # Place aad arguments in their own group
        aad_arguments = 'Azure Active Directory'
        c.argument('aad_client_id', arg_group=aad_arguments)
        c.argument('aad_client_secret', arg_group=aad_arguments)
        c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)

    with self.argument_context('vm extension') as c:
        c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
        c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
        c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))

    with self.argument_context('vm extension list') as c:
        c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)

    with self.argument_context('vm extension show') as c:
        c.argument('instance_view', action='store_true', help='The instance view of a virtual machine extension.')

    with self.argument_context('vm secret') as c:
        c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query ""[?attributes.enabled].id"" -o tsv\'')
        c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
        c.argument('certificate', help='key vault certificate name or its full secret URL')
        c.argument('certificate_store', help='Windows certificate store names. Default: My')

    with self.argument_context('vm secret list') as c:
        c.argument('vm_name', arg_type=existing_vm_name, id_part=None)

    with self.argument_context('vm image') as c:
        c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher')
        c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
        c.argument('offer', options_list=['--offer', '-f'], help='image offer')
        c.argument('plan', help='image billing plan')
        c.argument('sku', options_list=['--sku', '-s'], help='image sku')
        c.argument('version', help=""image sku's version"")
        c.argument('urn', help=""URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted"")

    with self.argument_context('vm image list') as c:
        c.argument('image_location', get_location_type(self.cli_ctx))
        c.argument('edge_zone', edge_zone_type)

    with self.argument_context('vm image list-offers') as c:
        c.argument('edge_zone', edge_zone_type)

    with self.argument_context('vm image list-skus') as c:
        c.argument('edge_zone', edge_zone_type)

    with self.argument_context('vm image list-publishers') as c:
        c.argument('edge_zone', edge_zone_type)

    with self.argument_context('vm image show') as c:
        c.argument('skus', options_list=['--sku', '-s'])
        c.argument('edge_zone', edge_zone_type)

    with self.argument_context('vm image terms') as c:
        c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
        c.argument('publisher', help='Image publisher')
        c.argument('offer', help='Image offer')
        c.argument('plan', help='Image billing plan')

    with self.argument_context('vm nic') as c:
        c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
        c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
        c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')

    with self.argument_context('vm nic show') as c:
        c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)

    with self.argument_context('vm unmanaged-disk') as c:
        c.argument('new', action='store_true', help='Create a new disk.')
        c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
        c.argument('vhd_uri', help=""Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd"")

    with self.argument_context('vm unmanaged-disk attach') as c:
        c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
        c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)

    with self.argument_context('vm unmanaged-disk detach') as c:
        c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')

    for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
        with self.argument_context(scope) as c:
            c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)

    with self.argument_context('vm unmanaged-disk list') as c:
        c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)

    with self.argument_context('vm user') as c:
        c.argument('username', options_list=['--username', '-u'], help='The user name')
        c.argument('password', options_list=['--password', '-p'], help='The user password')

    with self.argument_context('vm list-skus') as c:
        c.argument('size', options_list=['--size', '-s'], help=""size name, partial name is accepted"")
        c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help=""show skus supporting availability zones"")
        c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
                   help=""show all information including vm sizes not available under the current subscription"")
        c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. ""availabilitySets"", ""snapshots"", ""disks"", etc')

    with self.argument_context('vm restart') as c:
        c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')

    with self.argument_context('vm host') as c:
        c.argument('host_group_name', options_list=['--host-group'], id_part='name', help=""Name of the Dedicated Host Group"")
        c.argument('host_name', name_arg_type, id_part='child_name_1', help=""Name of the Dedicated Host"")
        c.ignore('expand')

    with self.argument_context('vm host create') as c:
        c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
                   help=""Fault domain of the host within a group. Allowed values: 0, 1, 2"")
        c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
                   help=""Replace the host automatically if a failure occurs"")
        c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
                   help=""The software license type that will be applied to the VMs deployed on the dedicated host."")
        c.argument('sku', help=""SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/"")

    with self.argument_context('vm host list') as c:
        c.argument('host_group_name', id_part=None)

    with self.argument_context('vm host group') as c:
        c.argument('host_group_name', name_arg_type, id_part='name', help=""Name of the Dedicated Host Group"")
        c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01',
                   help='Specify whether virtual machines or virtual machine scale sets can be placed automatically '
                        'on the dedicated host group. Automatic placement means resources are allocated on dedicated '
                        'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to '
                        'false when not provided.')

    with self.argument_context('vm host group create') as c:
        c.argument('platform_fault_domain_count', options_list=[""--platform-fault-domain-count"", ""-c""], type=int,
                   help=""Number of fault domains that the host group can span."")
        c.argument('zones', zone_type)
        c.argument('ultra_ssd_enabled', arg_type=get_three_state_flag(), min_api='2022-03-01', help='Enable a capability to have UltraSSD Enabled Virtual Machines on Dedicated Hosts of the Dedicated Host Group.')

    for scope in [""vm host"", ""vm host group""]:
        with self.argument_context(""{} create"".format(scope)) as c:
            location_type = get_location_type(self.cli_ctx)
            custom_location_msg = "" Otherwise, location will default to the resource group's location""
            custom_location_type = CLIArgumentType(overrides=location_type,
                                                   help=location_type.settings[""help""] + custom_location_msg)
            c.argument('location', arg_type=custom_location_type)
    # endregion

    # region VMSS
    scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']

    with self.argument_context('vmss') as c:
        c.argument('zones', zones_type, min_api='2017-03-30')
        c.argument('instance_id', id_part='child_name_1')
        c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
        c.argument('tags', tags_type)
        c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
        for dest in scaleset_name_aliases:
            c.argument(dest, vmss_name_type)
        c.argument('host_group', min_api='2020-06-01',
                   help='Name or ID of dedicated host group that the virtual machine scale set resides in')

    for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']:
        with self.argument_context(scope) as c:
            for dest in scaleset_name_aliases:
                c.argument(dest, vmss_name_type, id_part=None)  # due to instance-ids parameter

    with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c:
        VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)

        c.argument('name', name_arg_type)
        c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
        c.argument('single_placement_group', arg_type=get_three_state_flag(), help=""Limit the scale set to a single placement group.""
                   "" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details."")
        c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
        c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
        c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
        c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
        c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
        c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
        c.argument('vm_sku', help='Size of VMs in the scale set. Default to ""Standard_DS1_v2"". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
        c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
        c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
                   help=""The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set"")
        c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
        c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
        c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.',
                   arg_type=get_enum_type(['Uniform', 'Flexible']))
        c.argument('scale_in_policy', scale_in_policy_type)
        c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
                   help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
        c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.')
        c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
        c.argument('network_api_version', min_api='2021-03-01',
                   help=""Specify the Microsoft.Network API version used when creating networking resources in the Network ""
                        ""Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default ""
                        ""value is 2020-11-01."")
        c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
        c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
        c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
                   help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
        c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
                   help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
        c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
                   help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
        c.argument('security_type', security_type)
        c.argument('enable_secure_boot', enable_secure_boot_type)
        c.argument('enable_vtpm', enable_vtpm_type)

    with self.argument_context('vmss create', arg_group='Network Balancer') as c:
        LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
        c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify """" for none.', options_list=['--app-gateway'])
        c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
        c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
        c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
        c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
        c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
        c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify """" for none.', options_list=['--load-balancer', '--lb'])
        c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
                   help=""Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'"")
        c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])

    with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
        c.argument('public_ip_per_vm', action='store_true', help=""Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules"")
        c.argument('vm_domain_name', help=""domain name of VM instances, once configured, the FQDN is `vm..<..rest..>`"")
        c.argument('dns_servers', nargs='+', help=""space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6"")
        c.argument('accelerated_networking', arg_type=get_three_state_flag(),
                   help=""enable accelerated networking. Unless specified, CLI will enable it based on machine image and size"")

    with self.argument_context('vmss update') as c:
        protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group=""Protection Policy"", min_api='2019-03-01')
        c.argument('protect_from_scale_in', arg_type=protection_policy_type, help=""Protect the VM instance from scale-in operations."")
        c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help=""Protect the VM instance from scale set actions (including scale-in)."")
        c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
                   help='Enable terminate notification')
        c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
        c.argument('scale_in_policy', scale_in_policy_type)
        c.argument('force_deletion', action='store_true', is_preview=True, help='This property allow you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.')
        c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
        c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01',
                   help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
        c.argument('spot_restore_timeout', min_api='2021-04-01',
                   help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
        c.argument('vm_sku', help='The new size of the virtual machine instances in the scale set. Default to ""Standard_DS1_v2"". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True)
        c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type,
                   help='Only applicable when used with `--vm-sku`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True)
        c.argument('enable_secure_boot', enable_secure_boot_type)
        c.argument('enable_vtpm', enable_vtpm_type)

    with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:

        c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
        c.argument(
            'automatic_repairs_grace_period',
            help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
        )
        c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.')

    for scope in ['vmss create', 'vmss update']:
        with self.argument_context(scope) as c:
            c.argument('terminate_notification_time', min_api='2019-03-01',
                       help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
            c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01',
                       help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%')
            c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01',
                       help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%')
            c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01',
                       help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%')
            c.argument('pause_time_between_batches', min_api='2020-12-01',
                       help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds')
            c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01',
                       help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size')
            c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01',
                       help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances')

    for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
        with self.argument_context(scope) as c:
            c.argument('instance_id', id_part='child_name_1', help=""{0} VM instance with this ID. If missing, {0} VMSS."".format(help_prefix))

    for scope in ['vmss update-instances', 'vmss delete-instances']:
        with self.argument_context(scope) as c:
            c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')

    with self.argument_context('vmss diagnostics') as c:
        c.argument('vmss_name', id_part=None, help='Scale set name')

    with self.argument_context('vmss disk') as c:
        options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
        new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)

        c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
        c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
        c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
        c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
                   min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
        c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
        c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')

    with self.argument_context('vmss encryption') as c:
        c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))

    with self.argument_context('vmss extension') as c:
        c.argument('extension_name', name_arg_type, help='Name of the extension.')
        c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)

    with self.argument_context('vmss nic') as c:
        c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
        c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
        c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')

    with self.argument_context('vmss nic list') as c:
        c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)

    with self.argument_context('vmss set-orchestration-service-state') as c:
        c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
        c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
    # endregion

    # region VM & VMSS Shared
    for scope in ['vm', 'vmss']:
        with self.argument_context(scope) as c:
            c.argument('no_auto_upgrade',
                       options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')],
                       arg_type=get_three_state_flag(),
                       help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')

        with self.argument_context('{} run-command'.format(scope)) as c:
            c.argument('command_id', completer=get_vm_run_command_completion_list, help=""The command id. Use 'az {} run-command list' to get the list"".format(scope))
            if scope == 'vmss':
                c.argument('vmss_name', vmss_name_type)

        with self.argument_context('{} run-command invoke'.format(scope)) as c:
            c.argument('parameters', nargs='+', help=""space-separated parameters in the format of '[name=]value'"")
            c.argument('scripts', nargs='+', help=""Space-separated script lines. Use @{file} to load script from a file"")

        with self.argument_context('{} stop'.format(scope)) as c:
            c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')

    run_cmd_name_type = CLIArgumentType(options_list=['--name', '--run-command-name'], help='The name of the virtual machine run command.')
    run_cmd_vm_name = CLIArgumentType(options_list=['--vm-name'], help='The name of the virtual machine')
    for scope in ['create', 'update']:
        with self.argument_context('vm run-command {}'.format(scope)) as c:
            c.argument('vm_name', run_cmd_vm_name)
            c.argument('run_command_name', run_cmd_name_type)
            c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
                       validator=get_default_location_from_resource_group)
            c.argument('tags', tags_type)
            c.argument('script', help='Contain the powershell or bash script to execute on the VM.')
            c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ')
            c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using ""list"" command.')
            c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.')
            c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.')
            c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning '
                       'will complete as soon as the script starts and will not wait for script to complete.')
            c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.')
            c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ')
            c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.')
            c.argument('output_blob_uri', help='Specify the Azure storage blob where script output stream will be uploaded.')
            c.argument('error_blob_uri', help='Specify the Azure storage blob where script error stream will be uploaded.')

    with self.argument_context('vm run-command delete') as c:
        c.argument('vm_name', run_cmd_vm_name)
        c.argument('run_command_name', run_cmd_name_type)

    with self.argument_context('vm run-command list') as c:
        c.argument('vm_name', run_cmd_vm_name, id_part=None)
        c.argument('expand', help='The expand expression to apply on the operation.')
        c.argument('location', arg_type=get_location_type(self.cli_ctx))

    with self.argument_context('vm run-command show') as c:
        c.argument('vm_name', run_cmd_vm_name)
        c.argument('run_command_name', run_cmd_name_type)
        c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True))
        c.argument('instance_view', action='store_true', help='The instance view of a run command.')
        c.argument('location', arg_type=get_location_type(self.cli_ctx))
        c.argument('command_id', help='The command id.')

    with self.argument_context('vm run-command wait') as c:
        c.argument('vm_name', run_cmd_vm_name)
        c.argument('run_command_name', run_cmd_name_type)
        c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True))
        c.argument('instance_view', action='store_true', help='The instance view of a run command.')
        c.argument('location', arg_type=get_location_type(self.cli_ctx))
        c.argument('command_id', help='The command id.')

    run_cmd_vmss_name = CLIArgumentType(options_list=['--vmss-name'], help='The name of the VM scale set.')
    for scope in ['create', 'update']:
        with self.argument_context('vmss run-command {}'.format(scope)) as c:
            c.argument('vmss_name', run_cmd_vmss_name)
            c.argument('instance_id', help='The instance ID of the virtual machine.')
            c.argument('run_command_name', run_cmd_name_type)
            c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
                       validator=get_default_location_from_resource_group)
            c.argument('tags', tags_type)
            c.argument('script', help='Contain the powershell or bash script to execute on the VM.')
            c.argument('script_uri',
                       help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ')
            c.argument('command_id',
                       help='Specify a command id of predefined script. All command ids can be listed using ""list"" command.')
            c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.')
            c.argument('protected_parameters', nargs='+',
                       help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.')
            c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning '
                                                                                'will complete as soon as the script starts and will not wait for script to complete.')
            c.argument('run_as_user',
                       help='By default script process runs under system/root user. Specify custom user to host the process.')
            c.argument('run_as_password',
                       help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ')
            c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.')
            c.argument('output_blob_uri', help='Uri (without SAS) to an append blob where the script output will be uploaded.')
            c.argument('error_blob_uri', help='Uri (without SAS) to an append blob where the script error stream will be uploaded.')

    with self.argument_context('vmss run-command delete') as c:
        c.argument('vmss_name', run_cmd_vmss_name)
        c.argument('instance_id', help='The instance ID of the virtual machine.')
        c.argument('run_command_name', run_cmd_name_type)

    with self.argument_context('vmss run-command list') as c:
        c.argument('vmss_name', run_cmd_vmss_name, id_part=None)
        c.argument('instance_id', help='The instance ID of the virtual machine.')
        c.argument('expand', help='The expand expression to apply on the operation.')

    with self.argument_context('vmss run-command show') as c:
        c.argument('vmss_name', run_cmd_vmss_name)
        c.argument('instance_id', help='The instance ID of the virtual machine.')
        c.argument('run_command_name', run_cmd_name_type)
        c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True))
        c.argument('instance_view', action='store_true', help='The instance view of a run command.')

    for scope in ['vm identity assign', 'vmss identity assign']:
        with self.argument_context(scope) as c:
            c.argument('assign_identity', options_list=['--identities'], nargs='*', help=""Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'"".format(MSI_LOCAL_ID))
            c.argument('vm_name', existing_vm_name)
            c.argument('vmss_name', vmss_name_type)

    for scope in ['vm identity remove', 'vmss identity remove']:
        with self.argument_context(scope) as c:
            c.argument('identities', nargs='+', help=""Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'"".format(MSI_LOCAL_ID))
            c.argument('vm_name', existing_vm_name)
            c.argument('vmss_name', vmss_name_type)

    for scope in ['vm identity show', 'vmss identity show']:
        with self.argument_context(scope) as c:
            c.argument('vm_name', existing_vm_name)
            c.argument('vmss_name', vmss_name_type)

    for scope in ['vm application set', 'vmss application set']:
        with self.argument_context(scope) as c:
            c.argument('vm', existing_vm_name)
            c.argument('vmss_name', vmss_name_type)
            c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help=""Space-separated application version ids to set to VM."")
            c.argument('order_applications', action='store_true', help='Whether to set order index at each gallery application. If specified, the first app version id gets specified an order = 1, then the next one 2, and so on. This parameter is meant to be used when the VMApplications specified by app version ids must be installed in a particular order; the lowest order is installed first.')
            c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*',
                       help='Space-separated application configuration overrides for each application version ids. '
                       'It should have the same number of items as the application version ids. Null is available for a application '
                       'which does not have a configuration override.')
            c.argument('treat_deployment_as_failure', nargs='*', help=""Space-separated list of true or false corresponding to the application version ids. If set to true, failure to install or update gallery application version operation will fail this operation"")

    for scope in ['vm application list', 'vmss application list']:
        with self.argument_context(scope) as c:
            c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
            c.argument('vmss_name', vmss_name_type, id_part=None)

    for scope in ['vm create', 'vmss create']:
        with self.argument_context(scope) as c:
            c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
            c.argument('tags', tags_type)
            c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
            c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
            c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
            c.argument('image', completer=get_urn_aliases_completion_list)
            c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
            c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ ""sourceVault"": { ""id"": ""value"" }, ""vaultCertificates"": [{ ""certificateUrl"": ""value"", ""certificateStore"": ""cert store name (only on windows)""}] }]`', type=file_type, completer=FilesCompleter())
            c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help=""accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples"")
            c.ignore('aux_subscriptions')
            c.argument('edge_zone', edge_zone_type)
            c.argument('accept_term', action='store_true', help=""Accept the license agreement and privacy statement."")
            c.argument('disable_integrity_monitoring', action='store_true', min_api='2020-12-01', help='Disable the default behavior of installing guest attestation extension and enabling System Assigned Identity for Trusted Launch enabled VMs and VMSS.')

        with self.argument_context(scope, arg_group='Authentication') as c:
            c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
            c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.')
            c.argument('admin_password', help=""Password for the VM if authentication type is 'Password'."")
            c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
            c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value ""/home/username/.ssh/authorized_keys"" due to a known issue in Linux provisioning agent.')
            c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. ""all"" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))

        with self.argument_context(scope, arg_group='Storage') as c:
            if DiskStorageAccountTypes:
                allowed_values = "", "".join([sku.value for sku in DiskStorageAccountTypes])
            else:
                allowed_values = "", "".join(['Premium_LRS', 'Standard_LRS'])

            usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is ""os"" or a 0-indexed lun.'
            allowed_values = 'Allowed values: {}.'.format(allowed_values)
            storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
                               'or specify individual disks. {} {}'.format(usage, allowed_values)

            c.argument('os_disk_name', help='The name of the new VM OS disk.')
            c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
            c.argument('storage_account', help=""Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created."")
            c.argument('storage_sku', nargs='+', help=storage_sku_help)
            c.argument('storage_container_name', help=""Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds"")
            c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
            c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
            c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
            c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
            c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
            c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
            c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
                       help=""storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `= =` to configure individual disk"")
            c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
            c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
                       help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
            c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type,
                       help='Only applicable when used with `--ephemeral-os-disk`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True)
            c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
            c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
                       help='Names or IDs (space delimited) of disk encryption sets for data disks.')
            c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
            c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
            c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
            c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.')
            c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01',
                       help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.')
            c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)],
                       nargs='+', min_api='2021-03-01',
                       help='Specify whether data disk should be deleted or detached upon VM deletion. If a single data disk is attached, the allowed values are Delete and Detach. For multiple data disks are attached, please use ""=Delete =Detach"" to configure each disk')

        with self.argument_context(scope, arg_group='Network') as c:
            c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
            c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
            c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
            c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
            c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
            c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
            c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify """" for None (\'""""\' in Azure CLI using PowerShell or --% operator).')
            c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
            c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
            if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
                PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
                c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'',
                           default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
            c.argument('nic_delete_option', nargs='+', min_api='2021-03-01',
                       help='Specify what happens to the network interface when the VM is deleted. Use a singular '
                       'value to apply on all resources, or use = to configure '
                       'the delete behavior for individual resources. Possible options are Delete and Detach.')

        with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
            c.argument('plan_name', help='plan name')
            c.argument('plan_product', help='plan product')
            c.argument('plan_publisher', help='plan publisher')
            c.argument('plan_promotion_code', help='plan promotion code')

    for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
        with self.argument_context(scope) as c:
            arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
            c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group,
                       help=""Scope that the system assigned identity can access. "")
            c.ignore('identity_role_id')

    for scope in ['vm create', 'vmss create']:
        with self.argument_context(scope) as c:
            c.argument('identity_role', options_list=['--role'], arg_group='Managed Service Identity',
                       help='Role name or id the system assigned identity will have. ')

    for scope in ['vm identity assign', 'vmss identity assign']:
        with self.argument_context(scope) as c:
            c.argument('identity_role', options_list=['--role'], help=""Role name or id the system assigned identity will have"")

    with self.argument_context('vm auto-shutdown') as c:
        c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
        c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
        c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
        c.argument('webhook', help='The webhook URL to which the notification will be sent')
        c.argument('location', validator=get_default_location_from_resource_group)

    for scope in ['vm diagnostics', 'vmss diagnostics']:
        with self.argument_context(scope) as c:
            c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
            c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
            c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
            c.argument('is_windows_os', action='store_true', help='for Windows VMs')

    for scope in ['vm encryption', 'vmss encryption']:
        with self.argument_context(scope) as c:
            c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
            c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
            c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
            c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
            c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')

    for scope in ['vm extension', 'vmss extension']:
        with self.argument_context(scope) as c:
            c.argument('publisher', help='The name of the extension publisher.')
            c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
            c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
            c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.')
            c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(),
                       help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.')

    with self.argument_context('vm extension set') as c:
        c.argument('vm_extension_name', name_arg_type,
                   completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
                   help='Name of the extension.', id_part=None)
        c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
        c.argument('extension_instance_name', extension_instance_name_type)

    with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
        c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
        c.argument('extension_instance_name', extension_instance_name_type)
        c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')

    for scope in ['vm extension image', 'vmss extension image']:
        with self.argument_context(scope) as c:
            c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
            c.argument('name', help='Image name', id_part=None)
            c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
            c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
            c.argument('latest', action='store_true', help='Show the latest version only.')
            c.argument('version', help='Extension version')
            c.argument('orderby', help=""the $orderby odata query option"")
            c.argument('top', help='the $top odata query option')

    for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
        with self.argument_context(scope) as c:
            c.argument('license_type', license_type)
            c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
                       arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
                       help=""Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular."")
            c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
                       help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons')
            c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'],
                       help='The ID or name of the capacity reservation group that is used to allocate. Pass in ""None"" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.',
                       min_api='2021-04-01', is_preview=True)
            c.argument('v_cpus_available', type=int, min_api='2021-11-01', help='Specify the number of vCPUs available')
            c.argument('v_cpus_per_core', type=int, min_api='2021-11-01', help='Specify the ratio of vCPU to physical core. Setting this property to 1 also means that hyper-threading is disabled.')

    with self.argument_context('vm update') as c:
        c.argument('license_type', license_type)
        c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')

    with self.argument_context('vmss create') as c:
        c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
                   arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
                   help=""Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular."")

    with self.argument_context('sig') as c:
        c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
        c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
        c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')

    for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
        with self.argument_context(scope) as c:
            c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
            c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')

    with self.argument_context('sig show') as c:
        c.argument('select', help='The select expression to apply on the operation.')
        c.argument('sharing_groups', action='store_true', help='The expand query option to query shared gallery groups')

    with self.argument_context('sig list-shared') as c:
        c.argument('location', arg_type=get_location_type(self.cli_ctx))
        c.argument('shared_to', shared_to_type)

    with self.argument_context('sig show-shared') as c:
        c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
        c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
                   id_part='child_name_1')

    for scope in ['sig share add', 'sig share remove']:
        with self.argument_context(scope) as c:
            c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
            c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.')
            c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.')

    with self.argument_context('sig share add') as c:
        c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True),
                   help='distinguish add operation and remove operation')

    with self.argument_context('sig share remove') as c:
        c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True),
                   help='distinguish add operation and remove operation')

    with self.argument_context('sig share reset') as c:
        c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')

    with self.argument_context('sig image-definition create') as c:
        c.argument('offer', options_list=['--offer', '-f'], help='image offer')
        c.argument('sku', options_list=['--sku', '-s'], help='image sku')
        c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
        c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
        c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help=""This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'."")
        c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
        c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
        c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
        c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
        c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')

        c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
        c.argument('plan_name', help='plan name', arg_group='Purchase plan')
        c.argument('plan_product', help='plan product', arg_group='Purchase plan')

        c.argument('eula', help='The Eula agreement for the gallery image')
        c.argument('privacy_statement_uri', help='The privacy statement uri')
        c.argument('release_note_uri', help='The release note uri')
        c.argument('end_of_life_date', help=""the end of life date, e.g. '2020-12-31'"")
        c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
        c.argument('features', help='A list of gallery image features. E.g. ""IsSecureBootSupported=true IsMeasuredBootSupported=false""')
        c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='gallery_images')), min_api='2021-10-01', help='CPU architecture.')

    with self.argument_context('sig image-definition list-shared') as c:
        c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
        c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
                   id_part='child_name_1')
        c.argument('shared_to', shared_to_type)
        c.argument('marker', arg_type=marker_type)
        c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.')

    with self.argument_context('sig image-definition show-shared') as c:
        c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
        c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
                   id_part='child_name_1')
        c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
                   'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
                   id_part='child_name_2')

    with self.argument_context('sig create') as c:
        c.argument('description', help='the description of the gallery')
        c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
                   min_api='2020-09-30', is_experimental=True,
                   help='This property allows you to specify the permission of sharing gallery.')
        c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True,
                   help='Enable soft-deletion for resources in this gallery, '
                        'allowing them to be recovered within retention time.')
    with self.argument_context('sig update') as c:
        c.ignore('gallery')
        c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
                   min_api='2020-09-30', is_experimental=True,
                   help='This property allows you to specify the permission of sharing gallery.')
        c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True,
                   help='Enable soft-deletion for resources in this gallery, '
                        'allowing them to be recovered within retention time.')
    with self.argument_context('sig image-definition create') as c:
        c.argument('description', help='the description of the gallery image definition')
    with self.argument_context('sig image-definition update') as c:
        c.ignore('gallery_image')

    with self.argument_context('sig image-version') as c:
        deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration=""3.0.0"")
        c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option],
                   help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`')

    with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c:
        c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
                   help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`')
        c.argument('description', help='the description of the gallery image version')
        c.argument('managed_image', help='image name(if in the same resource group) or resource id')
        c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
        c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
        c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
        c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
        c.argument('version', help='image version')
        c.argument('end_of_life_date', help=""the end of life date, e.g. '2020-12-31'"")
        c.argument('storage_account_type', help=""The default storage account type to be used per region. To set regional storage account types, use --target-regions"",
                   arg_type=get_enum_type([""Standard_LRS"", ""Standard_ZRS"", ""Premium_LRS""]), min_api='2019-03-01')
        c.argument('target_region_encryption', nargs='+',
                   help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `,,,,`. Use ""null"" as a placeholder.')
        c.argument('os_vhd_uri', help='Source VHD URI of OS disk')
        c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk')
        c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks')
        c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks')
        c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks')
        c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.')
        c.argument('target_region_cvm_encryption', nargs='+', min_api='2021-10-01', help='Space-separated list of customer managed key for Confidential VM encrypting the OS disk in the gallery artifact for each region. Format for each region: `,`. The valid values for os_cvm_encryption_type are EncryptedVMGuestStateOnlyWithPmk, EncryptedWithPmk, EncryptedWithCmk.')
        c.argument('virtual_machine', help='Resource id of VM source')
        c.argument('image_version', help='Resource id of gallery image version source')

    with self.argument_context('sig image-version list-shared') as c:
        c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
        c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
                   id_part='child_name_1')
        c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
                   'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
                   id_part='child_name_2')
        c.argument('shared_to', shared_to_type)
        c.argument('marker', arg_type=marker_type)
        c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.')

    with self.argument_context('sig image-version show') as c:
        c.argument('expand', help=""The expand expression to apply on the operation, e.g. 'ReplicationStatus'"")

    with self.argument_context('sig image-version show-shared') as c:
        c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
        c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
                   id_part='child_name_1')
        c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
                   'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
                   id_part='child_name_2')
        c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The '
                   'name of the gallery image version to be created. Needs to follow semantic version name pattern: '
                   'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. '
                   'Format: ..', id_part='child_name_3')

    for scope in ['sig image-version create', 'sig image-version update']:
        with self.argument_context(scope) as c:
            c.argument('target_regions', nargs='*',
                       help='Space-separated list of regions and their replica counts. Use `[=][=]` to optionally set the replica count and/or storage account type for each region. '
                            'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
            c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
    # endregion

    # region Gallery applications
    with self.argument_context('sig gallery-application') as c:
        c.argument('gallery_application_name', options_list=['--name', '-n', '--application-name'],
                   help='The name of the gallery Application')

    with self.argument_context('sig gallery-application create') as c:
        c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
                   validator=get_default_location_from_resource_group)
        c.argument('description', help='The description of this gallery Application Definition resource. '
                   'This property is updatable.')
        c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='This property allows you '
                   'to specify the supported type of the OS that application is built for. 

Possible values ' 'are:

**Windows**

**Linux**') with self.argument_context('sig gallery-application update') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') with self.argument_context('sig gallery-application version') as c: c.argument('gallery_application_name', options_list=['--application-name'], help='The name of the gallery Application') c.argument('gallery_application_version_name', options_list=['--name', '-n', '--version-name'], help='The name of the gallery Application Version') for scope in ['create', 'update']: with self.argument_context('sig gallery-application version {}'.format(scope)) as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('package_file_link', help='The mediaLink of the artifact, must be a readable storage page blob.') c.argument('install_command', help='The path and arguments to install the gallery application.') c.argument('remove_command', help='The path and arguments to remove the gallery application.') c.argument('update_command', help='The path and arguments to update the gallery application. If not present,' ' then update operation will invoke remove command on the previous version ' 'and install command on the current version of the gallery application.') c.argument('target_regions', type=validate_file_or_dict, help='The target regions where the Image Version is ' 'going to be replicated to. This property is updatable. Expected value: ' 'json-string/json-file/@json-file.') c.argument('default_file_link', help='The default configuration link of the artifact, must be a readable storage page blob.') c.argument('exclude_from', arg_type=get_three_state_flag(), help='If set to true, Virtual Machines ' 'deployed from the latest version of the Image Definition won\'t use this Image Version.', arg_group='Publishing Profile') c.argument('end_of_life_date', help='The end of life date of the gallery image version. This property can be ' 'used for decommissioning purposes. This property is updatable.', arg_group='Publishing Profile') # endregion # region Proximity Placement Group with self.argument_context('ppg', min_api='2018-04-01') as c: c.argument('proximity_placement_group_name', arg_type=name_arg_type, help=""The name of the proximity placement group."") with self.argument_context('ppg create', min_api='2018-04-01') as c: c.argument('ppg_type', options_list=['--type', '-t'], help=""The type of the proximity placement group. Allowed values: Standard."") c.argument('tags', tags_type) with self.argument_context('ppg show', min_api='2019-07-01') as c: c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.') for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'), ('vm availability-set create', 'availability set'), ('vm update', 'VM'), ('vmss update', 'VMSS'), ('vm availability-set update', 'availability set')]: with self.argument_context(scope, min_api='2018-04-01') as c: c.argument('proximity_placement_group', options_list=['--ppg'], help=""The name or ID of the proximity placement group the {} should be associated with."".format(item), validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added. # endregion # region VM Monitor with self.argument_context('vm monitor log show') as c: c.argument('analytics_query', options_list=['--analytics-query', '-q'], help=""Query to execute over Log Analytics data."") c.argument('timespan', help=""Timespan over which to query. Defaults to querying all available data."") with self.argument_context('vm monitor metrics') as c: c.argument('metricnamespace', options_list=['--namespace'], help='Namespace to query metric definitions for.') with self.argument_context('vm monitor metrics tail') as c: from azure.mgmt.monitor.models import AggregationType c.extra('resource_group_name', required=True) c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) c.argument('metadata', action='store_true') c.argument('dimension', nargs='*', validator=validate_metric_dimension) c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*') c.argument('metrics', nargs='*') c.argument('orderby', help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc') c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.') c.argument('filters', options_list=['--filter']) c.argument('metric_namespace', options_list=['--namespace']) with self.argument_context('vm monitor metrics tail', arg_group='Time') as c: c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.')) c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.')) c.argument('offset', type=get_period_type(as_timedelta=True)) c.argument('interval', arg_group='Time', type=get_period_type()) with self.argument_context('vm monitor metrics list-definitions') as c: c.extra('resource_group_name', required=True) c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) # endregion # region disk encryption set with self.argument_context('disk-encryption-set') as c: c.argument('disk_encryption_set_name', disk_encryption_set_name) c.argument('key_url', help='URL pointing to a key or secret in KeyVault.') c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.') c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']), help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01', options_list=['--enable-auto-key-rotation', '--auto-rotation'], help='Enable automatic rotation of keys.') # endregion # region DiskAccess with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c: c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) # endRegion # region Capacity with self.argument_context('capacity reservation group') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'], help='The name of the capacity reservation group.') c.argument('tags', tags_type) with self.argument_context('capacity reservation group create') as c: c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.') with self.argument_context('capacity reservation group show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.') with self.argument_context('capacity reservation group list') as c: c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.') c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.') with self.argument_context('capacity reservation') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'], help='The name of the capacity reservation group.') c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'], help='The name of the capacity reservation.') c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.') c.argument('tags', tags_type) with self.argument_context('capacity reservation create') as c: c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.') c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called ""CapacityReservationSupported"" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.') with self.argument_context('capacity reservation show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.') # endRegion # region Restore point with self.argument_context('restore-point') as c: c.argument('restore_point_collection_name', options_list=['--collection-name'], help='The name of the restore point collection.') with self.argument_context('restore-point create') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('exclude_disks', nargs='+', help='List of disk resource ids that the ' 'customer wishes to exclude from the restore point. If no disks are specified, all disks will be ' 'included.') c.argument('source_restore_point', help='Resource Id of the source restore point from which a copy needs to be created') with self.argument_context('restore-point show') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='Show the instance view of a restore point.') with self.argument_context('restore-point delete') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') with self.argument_context('restore-point wait') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') # endRegion # region Restore point collection with self.argument_context('restore-point collection create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('source_id', help='Resource Id of the source resource used to create this restore point collection', arg_group='Source') with self.argument_context('restore-point collection update') as c: c.argument('tags', tags_type) with self.argument_context('restore-point collection show') as c: c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('restore_points', action='store_true', help='Show all contained restore points in the restore point collection.') ","def load_arguments(self, _): # Model imports DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes', operation_group='disks') SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots') UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes') HyperVGenerationTypes = self.get_models('HyperVGenerationTypes') DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes') OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets') RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux') GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries') ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions') # REUSABLE ARGUMENT DEFINITIONS name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME') multi_ids_type = CLIArgumentType(nargs='+') existing_vm_name = CLIArgumentType(overrides=name_arg_type, configured_default='vm', help=""The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=`"", completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name') existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name') existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name') vmss_name_type = CLIArgumentType(name_arg_type, configured_default='vmss', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), help=""Scale set name. You can configure the default using `az configure --defaults vmss=`"", id_part='name') extension_instance_name_type = CLIArgumentType(help=""Name of extension instance, which can be customized. Default: name of the extension."") image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name') disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name') ephemeral_placement_type = CLIArgumentType(options_list=['--ephemeral-os-disk-placement', '--ephemeral-placement'], arg_type=get_enum_type(['ResourceDisk', 'CacheDisk']), min_api='2019-12-01') license_type = CLIArgumentType( help=""Specifies that the Windows image or disk was licensed on-premises. To enable Azure Hybrid Benefit for "" ""Windows Server, use 'Windows_Server'. To enable Multi-tenant Hosting Rights for Windows 10, "" ""use 'Windows_Client'. For more information see the Azure Windows VM online docs."", arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_BASE', 'RHEL_SAPAPPS', 'RHEL_SAPHA', 'RHEL_EUS', 'RHEL_BASESAPAPPS', 'RHEL_BASESAPHA', 'SLES_STANDARD', 'SLES', 'SLES_SAP', 'SLES_HPC', 'None', 'RHEL_ELS_6'])) # StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute DiskStorageAccountTypes = DiskStorageAccountTypes or self.get_models('StorageAccountTypes') if DiskStorageAccountTypes: disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes)) else: # StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) if SnapshotStorageAccountTypes: snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes)) else: # SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) # special case for `network nic scale-set list` command alias with self.argument_context('network nic scale-set list') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') HyperVGenerationTypes = HyperVGenerationTypes or self.get_models('HyperVGeneration', operation_group='disks') if HyperVGenerationTypes: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default=""V1"")) else: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type([""V1"", ""V2""], default=""V1"")) ultra_ssd_enabled_type = CLIArgumentType( arg_type=get_three_state_flag(), min_api='2018-06-01', help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account') scale_in_policy_type = CLIArgumentType( nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')), help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.' ) edge_zone_type = CLIArgumentType( help='The name of edge zone.', min_api='2020-12-01', is_preview=True ) t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries') shared_to_type = CLIArgumentType( arg_type=get_enum_type(t_shared_to), help='The query parameter to decide what shared galleries to fetch when doing listing operations. ' 'If not specified, list by subscription id.' ) marker_type = CLIArgumentType( help='A string value that identifies the portion of the list of containers to be ' 'returned with the next listing operation. The operation returns the NextMarker value within ' 'the response body if the listing operation did not return all containers remaining to be listed ' 'with the current page. If specified, this generator will begin returning results from the point ' 'where the previous generator stopped.') enable_vtpm_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.') enable_secure_boot_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.') security_type = CLIArgumentType(arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01', help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.') # region MixedScopes for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']: with self.argument_context(scope) as c: c.argument('tags', tags_type) for scope in ['disk', 'snapshot']: with self.argument_context(scope) as c: c.ignore('source_blob_uri', 'source_disk', 'source_snapshot') c.argument('source_storage_account_id', help='used when source blob is in a different subscription') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int) if self.supported_api_version(min_api='2018-09-30', operation_group='disks'): c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level') c.argument('for_upload', arg_type=get_three_state_flag(), help='Create the {0} for uploading blobs later on through storage commands. Run ""az {0} grant-access --access-level Write"" to retrieve the {0}\'s SAS token.'.format(scope)) c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') else: c.ignore('access_level', 'for_upload', 'hyper_v_generation') c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType', operation_group='disks')), help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.') c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.') c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.') operation_group = 'disks' if scope == 'disk' else 'snapshots' c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group))) c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.') c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable on-demand bursting beyond the provisioned performance target of the disk. On-demand bursting is disabled by default, and it does not apply to Ultra disks.') c.argument('public_network_access', arg_type=get_enum_type(['Disabled', 'Enabled']), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to control the export policy on the disk.') c.argument('accelerated_network', arg_type=get_three_state_flag(), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to enable the accelerated networking if the OS disk image support.') for scope in ['disk create', 'snapshot create']: with self.argument_context(scope) as c: c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name') # endregion # region Disks with self.argument_context('disk', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c: c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to. c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.') c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help=""The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10"") c.argument('upload_size_bytes', type=int, min_api='2019-03-01', help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520') c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time') c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10') c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk') c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('gallery_image_reference', help='ID of the Compute Gallery image version from which to create a disk') c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.') c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.') c.argument('edge_zone', edge_zone_type) c.argument('security_type', arg_type=get_enum_type(self.get_models('DiskSecurityTypes', operation_group='disks')), help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01') c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='disks')), min_api='2021-12-01', help='CPU architecture.') c.argument('data_access_auth_mode', arg_type=get_enum_type(['AzureActiveDirectory', 'None']), min_api='2021-12-01', help='Specify the auth mode when exporting or uploading to a disk or snapshot.') # endregion # region Snapshots with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c: c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=snapshot_sku) c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01', help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed') c.argument('edge_zone', edge_zone_type) c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='snapshots')), min_api='2021-12-01', help='CPU architecture.') # endregion # region Images with self.argument_context('image') as c: c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux'])) c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images')) c.argument('tags', tags_type) with self.argument_context('image create') as c: # here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources c.argument('name', arg_type=name_arg_type, help='new image name') c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name') c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name') c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. ' 'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage') c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.') c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help=""Storage caching type for the image's OS disk."") c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes), help=""Storage caching type for the image's data disk."") c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api=""2019-03-01"", help='The hypervisor generation of the Virtual Machine created from the image.') c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots') c.argument('edge_zone', edge_zone_type, ) # endregion # region Image Templates with self.argument_context('image builder') as c: ib_output_name_help = ""Name of the image builder run output."" c.argument('location', get_location_type(self.cli_ctx)) c.argument('scripts', nargs='+', help=""Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."" "" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'"") c.argument('source', options_list=[""--image-source"", ""-i""], help=""The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID."") c.argument('image_template_name', image_template_name_type, help=""The name of the image template."") c.argument('checksum', help=""The SHA256 checksum of the Red Hat ISO image"") c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g ""image_1=westus2 image_2=westus"". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.') c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g ""my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth."" ' 'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a ""/"". Each value is a comma-delimited list of replica locations.') c.argument('output_name', help=ib_output_name_help) c.ignore('destinations_lists', 'scripts_list', 'source_dict') with self.argument_context('image builder create') as c: ib_source_type = CLIArgumentType(arg_group=""Image Source"") ib_customizer_type = CLIArgumentType(arg_group=""Customizer"") ib_cutput_type = CLIArgumentType(arg_group=""Output"") c.argument('build_timeout', type=int, help=""The Maximum duration to wait while building the image template, in minutes. Default is 60."") c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json') c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.') # VM profile c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)') c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size') c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name') c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine') c.argument('proxy_vm_size', help='Size of the virtual machine used to build, customize and capture images (Standard_D1_v2 for Gen1 images and Standard_D2ds_v4 for Gen2 images).') c.argument('build_vm_identities', nargs='+', help='Optional configuration of the virtual network to use to deploy the build virtual machine in. Omit if no specific virtual network needs to be used.') # Image Source Arguments c.argument('source', arg_type=ib_source_type) c.argument('checksum', arg_type=ib_source_type) c.argument('', arg_type=ib_source_type) # Image Customizer Arguments c.argument('scripts', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) # Image Output Arguments c.argument('managed_image_destinations', arg_type=ib_cutput_type) c.argument('shared_image_destinations', arg_type=ib_cutput_type) c.argument('output_name', arg_type=ib_cutput_type) with self.argument_context('image builder output') as c: ib_sig_regions_help = ""Space-separated list of regions to replicate the image version into."" ib_img_location_help = ""Location where the customized image will be created."" c.argument('gallery_image_definition', arg_group=""Shared Image Gallery"", help=""Name or ID of the existing SIG image definition to create the customized image version with."") c.argument('gallery_name', arg_group=""Shared Image Gallery"", help=""Shared image gallery name, if image definition name and not ID was provided."") c.argument('gallery_replication_regions', arg_group=""Shared Image Gallery"", nargs='+', help=ib_sig_regions_help) c.argument('managed_image', arg_group=""Managed Image"", help=""Name or ID of the customized managed image to be created."") c.argument('managed_image_location', arg_group=""Managed Image"", help=ib_img_location_help) with self.argument_context('image builder output add') as c: ib_artifact_tags_help = ""Tags that will be applied to the output artifact once it has been created by the distributor. "" + tags_type.settings['help'] ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=[""--artifact-tags""]) ib_default_loc_help = "" Defaults to resource group's location."" c.argument('output_name', help=ib_output_name_help + "" Defaults to the name of the managed image or sig image definition."") c.argument('gallery_replication_regions', arg_group=""Shared Image Gallery"", nargs='+', help=ib_sig_regions_help + ib_default_loc_help) c.argument('managed_image_location', arg_group=""Managed Image"", help=ib_img_location_help + ib_default_loc_help) c.argument('is_vhd', arg_group=""VHD"", help=""The output is a VHD distributor."", action='store_true') c.argument('tags', arg_type=ib_artifact_tags_type) c.ignore('location') with self.argument_context('image builder customizer') as c: ib_win_restart_type = CLIArgumentType(arg_group=""Windows Restart"") ib_win_update_type = CLIArgumentType(arg_group=""Windows Update"") ib_script_type = CLIArgumentType(arg_group=""Shell and Powershell"") ib_powershell_type = CLIArgumentType(arg_group=""Powershell"") ib_file_customizer_type = CLIArgumentType(arg_group=""File"") c.argument('customizer_name', help=""Name of the customizer."") c.argument('customizer_type', options_list=['--type', '-t'], help=""Type of customizer to be added to the image template."", arg_type=get_enum_type(ScriptType)) # Script Args c.argument('script_url', arg_type=ib_script_type, help=""URL of script to customize the image with. The URL must be publicly accessible."") c.argument('inline_script', arg_type=ib_script_type, nargs='+', help=""Space-separated list of inline script lines to customize the image with."") # Powershell Specific Args c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help=""Space-separated list of valid exit codes, as integers"") # Windows Restart Specific Args c.argument('restart_command', arg_type=ib_win_restart_type, help=""Command to execute the restart operation."") c.argument('restart_check_command', arg_type=ib_win_restart_type, help=""Command to verify that restart succeeded."") c.argument('restart_timeout', arg_type=ib_win_restart_type, help=""Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)"", default=""5m"") # Windows Update Specific Args c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.') c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)') c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)') # File Args c.argument('file_source', arg_type=ib_file_customizer_type, help=""The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc."") c.argument('dest_path', arg_type=ib_file_customizer_type, help=""The absolute destination path where the file specified in --file-source will be downloaded to in the image"") # endregion # region AvailabilitySets with self.argument_context('vm availability-set') as c: c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') with self.argument_context('vm availability-set create') as c: c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set') c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.') c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.') c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks') with self.argument_context('vm availability-set update') as c: if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'): c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') c.argument('availability_set_name', options_list=['--availability-set-name']) # endregion # region VirtualMachines with self.argument_context('vm') as c: c.argument('vm_name', existing_vm_name) c.argument('size', completer=get_vm_size_completion_list) c.argument('name', arg_type=name_arg_type) c.argument('zone', zone_type, min_api='2017-03-30') c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify """" for none.', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH'])) c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network') c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.') with self.argument_context('vm capture') as c: c.argument('overwrite', action='store_true') with self.argument_context('vm update') as c: c.argument('os_disk', min_api='2017-12-01', help=""Managed OS disk ID or name to swap to"") c.argument('write_accelerator', nargs='*', min_api='2017-12-01', help=""enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2"") c.argument('disk_caching', nargs='*', help=""Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks"") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('size', help='The new size of the virtual machine. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--size`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create') as c: c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines')) c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None) c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage') c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.') c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.') c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify """" for none (\'""""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE'])) c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('boot_diagnostics_storage', help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS') c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network', help=""enable accelerated networking. Unless specified, CLI will enable it based on machine image and size"") if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE): VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help=""The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine"") c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.') c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01') c.argument('platform_fault_domain', min_api='2020-06-01', help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view') c.argument('count', type=int, is_preview=True, help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create', arg_group='Storage') as c: c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help=""Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together."") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help=""Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together."") with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help=""Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples."") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help=""Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples."") with self.argument_context('vm open-port') as c: c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.') c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name) c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true') c.argument('port', help=""The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range."") c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int) for scope in ['vm show', 'vm list']: with self.argument_context(scope) as c: c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow') for scope in ['vm show', 'vmss show']: with self.argument_context(scope) as c: c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01') for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']: with self.argument_context(scope) as c: c.ignore('include_user_data') with self.argument_context('vm diagnostics') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name']) with self.argument_context('vm diagnostics set') as c: c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts')) with self.argument_context('vm install-patches') as c: c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)') c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.') c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.') c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.') c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only') c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only') c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help=""Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only"") c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') with self.argument_context('vm disk') as c: c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines')) c.argument('new', action='store_true', help='create a new disk') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') with self.argument_context('vm disk attach') as c: c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator') c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)], help=""The name or ID of the managed disk"", id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('disks', nargs='*', help=""One or more names or IDs of the managed disk (space-delimited)."", completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('ids', deprecate_info=c.deprecate(target='--ids', redirect='--disks', hide=True)) with self.argument_context('vm disk detach') as c: c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.') with self.argument_context('vm encryption enable') as c: c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted. (Only supported for Linux virtual machines.)') # Place aad arguments in their own group aad_arguments = 'Azure Active Directory' c.argument('aad_client_id', arg_group=aad_arguments) c.argument('aad_client_secret', arg_group=aad_arguments) c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments) with self.argument_context('vm extension') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1') c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(expiration='3.0.0', hide=True)) with self.argument_context('vm extension list') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm extension show') as c: c.argument('instance_view', action='store_true', help='The instance view of a virtual machine extension.') with self.argument_context('vm secret') as c: c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query ""[?attributes.enabled].id"" -o tsv\'') c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault) c.argument('certificate', help='key vault certificate name or its full secret URL') c.argument('certificate_store', help='Windows certificate store names. Default: My') with self.argument_context('vm secret list') as c: c.argument('vm_name', arg_type=existing_vm_name, id_part=None) with self.argument_context('vm image') as c: c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('plan', help='image billing plan') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('version', help=""image sku's version"") c.argument('urn', help=""URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted"") with self.argument_context('vm image list') as c: c.argument('image_location', get_location_type(self.cli_ctx)) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-offers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-skus') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-publishers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image show') as c: c.argument('skus', options_list=['--sku', '-s']) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image terms') as c: c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted') c.argument('publisher', help='Image publisher') c.argument('offer', help='Image offer') c.argument('plan', help='Image billing plan') with self.argument_context('vm nic') as c: c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None) c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics) c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.') with self.argument_context('vm nic show') as c: c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic) with self.argument_context('vm unmanaged-disk') as c: c.argument('new', action='store_true', help='Create a new disk.') c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') c.argument('vhd_uri', help=""Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd"") with self.argument_context('vm unmanaged-disk attach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) with self.argument_context('vm unmanaged-disk detach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']: with self.argument_context(scope) as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm unmanaged-disk list') as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) with self.argument_context('vm user') as c: c.argument('username', options_list=['--username', '-u'], help='The user name') c.argument('password', options_list=['--password', '-p'], help='The user password') with self.argument_context('vm list-skus') as c: c.argument('size', options_list=['--size', '-s'], help=""size name, partial name is accepted"") c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help=""show skus supporting availability zones"") c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(), help=""show all information including vm sizes not available under the current subscription"") c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. ""availabilitySets"", ""snapshots"", ""disks"", etc') with self.argument_context('vm restart') as c: c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.') with self.argument_context('vm host') as c: c.argument('host_group_name', options_list=['--host-group'], id_part='name', help=""Name of the Dedicated Host Group"") c.argument('host_name', name_arg_type, id_part='child_name_1', help=""Name of the Dedicated Host"") c.ignore('expand') with self.argument_context('vm host create') as c: c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int, help=""Fault domain of the host within a group. Allowed values: 0, 1, 2"") c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(), help=""Replace the host automatically if a failure occurs"") c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes), help=""The software license type that will be applied to the VMs deployed on the dedicated host."") c.argument('sku', help=""SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/"") with self.argument_context('vm host list') as c: c.argument('host_group_name', id_part=None) with self.argument_context('vm host group') as c: c.argument('host_group_name', name_arg_type, id_part='name', help=""Name of the Dedicated Host Group"") c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Specify whether virtual machines or virtual machine scale sets can be placed automatically ' 'on the dedicated host group. Automatic placement means resources are allocated on dedicated ' 'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to ' 'false when not provided.') with self.argument_context('vm host group create') as c: c.argument('platform_fault_domain_count', options_list=[""--platform-fault-domain-count"", ""-c""], type=int, help=""Number of fault domains that the host group can span."") c.argument('zones', zone_type) c.argument('ultra_ssd_enabled', arg_type=get_three_state_flag(), min_api='2022-03-01', help='Enable a capability to have UltraSSD Enabled Virtual Machines on Dedicated Hosts of the Dedicated Host Group.') for scope in [""vm host"", ""vm host group""]: with self.argument_context(""{} create"".format(scope)) as c: location_type = get_location_type(self.cli_ctx) custom_location_msg = "" Otherwise, location will default to the resource group's location"" custom_location_type = CLIArgumentType(overrides=location_type, help=location_type.settings[""help""] + custom_location_msg) c.argument('location', arg_type=custom_location_type) # endregion # region VMSS scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name'] with self.argument_context('vmss') as c: c.argument('zones', zones_type, min_api='2017-03-30') c.argument('instance_id', id_part='child_name_1') c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself') c.argument('tags', tags_type) c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type) c.argument('host_group', min_api='2020-06-01', help='Name or ID of dedicated host group that the virtual machine scale set resides in') for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']: with self.argument_context(scope) as c: for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c: VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('name', name_arg_type) c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.') c.argument('single_placement_group', arg_type=get_three_state_flag(), help=""Limit the scale set to a single placement group."" "" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details."") c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01') c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.') c.argument('instance_count', help='Number of VMs in the scale set.', type=int) c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true') c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode)) c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs') c.argument('vm_sku', help='Size of VMs in the scale set. Default to ""Standard_DS1_v2"". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network') c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help=""The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set"") c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long') c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.', arg_type=get_enum_type(['Uniform', 'Flexible'])) c.argument('scale_in_policy', scale_in_policy_type) c.argument('automatic_repairs_grace_period', min_api='2018-10-01', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.') c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('network_api_version', min_api='2021-03-01', help=""Specify the Microsoft.Network API version used when creating networking resources in the Network "" ""Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default "" ""value is 2020-11-01."") c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss create', arg_group='Network Balancer') as c: LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify """" for none.', options_list=['--app-gateway']) c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.') c.argument('app_gateway_sku', help='SKU when creating a new application gateway.') c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.') c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.') c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int) c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify """" for none.', options_list=['--load-balancer', '--lb']) c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName), help=""Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'"") c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name']) with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c: c.argument('public_ip_per_vm', action='store_true', help=""Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules"") c.argument('vm_domain_name', help=""domain name of VM instances, once configured, the FQDN is `vm..<..rest..>`"") c.argument('dns_servers', nargs='+', help=""space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6"") c.argument('accelerated_networking', arg_type=get_three_state_flag(), help=""enable accelerated networking. Unless specified, CLI will enable it based on machine image and size"") with self.argument_context('vmss update') as c: protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group=""Protection Policy"", min_api='2019-03-01') c.argument('protect_from_scale_in', arg_type=protection_policy_type, help=""Protect the VM instance from scale-in operations."") c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help=""Protect the VM instance from scale set actions (including scale-in)."") c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(), help='Enable terminate notification') c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('scale_in_policy', scale_in_policy_type) c.argument('force_deletion', action='store_true', is_preview=True, help='This property allow you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('vm_sku', help='The new size of the virtual machine instances in the scale set. Default to ""Standard_DS1_v2"". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--vm-sku`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c: c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs') c.argument( 'automatic_repairs_grace_period', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.' ) c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') for scope in ['vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('terminate_notification_time', min_api='2019-03-01', help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted') c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01', help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%') c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%') c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%') c.argument('pause_time_between_batches', min_api='2020-12-01', help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds') c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size') c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances') for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]: with self.argument_context(scope) as c: c.argument('instance_id', id_part='child_name_1', help=""{0} VM instance with this ID. If missing, {0} VMSS."".format(help_prefix)) for scope in ['vmss update-instances', 'vmss delete-instances']: with self.argument_context(scope) as c: c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.') with self.argument_context('vmss diagnostics') as c: c.argument('vmss_name', id_part=None, help='Scale set name') with self.argument_context('vmss disk') as c: options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']] new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances', min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') with self.argument_context('vmss encryption') as c: c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) with self.argument_context('vmss extension') as c: c.argument('extension_name', name_arg_type, help='Name of the extension.') c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss nic') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1') c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2') with self.argument_context('vmss nic list') as c: c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss set-orchestration-service-state') as c: c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.') c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.') # endregion # region VM & VMSS Shared for scope in ['vm', 'vmss']: with self.argument_context(scope) as c: c.argument('no_auto_upgrade', options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')], arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.') with self.argument_context('{} run-command'.format(scope)) as c: c.argument('command_id', completer=get_vm_run_command_completion_list, help=""The command id. Use 'az {} run-command list' to get the list"".format(scope)) if scope == 'vmss': c.argument('vmss_name', vmss_name_type) with self.argument_context('{} run-command invoke'.format(scope)) as c: c.argument('parameters', nargs='+', help=""space-separated parameters in the format of '[name=]value'"") c.argument('scripts', nargs='+', help=""Space-separated script lines. Use @{file} to load script from a file"") with self.argument_context('{} stop'.format(scope)) as c: c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01') run_cmd_name_type = CLIArgumentType(options_list=['--name', '--run-command-name'], help='The name of the virtual machine run command.') run_cmd_vm_name = CLIArgumentType(options_list=['--vm-name'], help='The name of the virtual machine') for scope in ['create', 'update']: with self.argument_context('vm run-command {}'.format(scope)) as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using ""list"" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Specify the Azure storage blob where script output stream will be uploaded.') c.argument('error_blob_uri', help='Specify the Azure storage blob where script error stream will be uploaded.') with self.argument_context('vm run-command delete') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vm run-command list') as c: c.argument('vm_name', run_cmd_vm_name, id_part=None) c.argument('expand', help='The expand expression to apply on the operation.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) with self.argument_context('vm run-command show') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') with self.argument_context('vm run-command wait') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') run_cmd_vmss_name = CLIArgumentType(options_list=['--vmss-name'], help='The name of the VM scale set.') for scope in ['create', 'update']: with self.argument_context('vmss run-command {}'.format(scope)) as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using ""list"" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Uri (without SAS) to an append blob where the script output will be uploaded.') c.argument('error_blob_uri', help='Uri (without SAS) to an append blob where the script error stream will be uploaded.') with self.argument_context('vmss run-command delete') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vmss run-command list') as c: c.argument('vmss_name', run_cmd_vmss_name, id_part=None) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('expand', help='The expand expression to apply on the operation.') with self.argument_context('vmss run-command show') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('assign_identity', options_list=['--identities'], nargs='*', help=""Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'"".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity remove', 'vmss identity remove']: with self.argument_context(scope) as c: c.argument('identities', nargs='+', help=""Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'"".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity show', 'vmss identity show']: with self.argument_context(scope) as c: c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm application set', 'vmss application set']: with self.argument_context(scope) as c: c.argument('vm', existing_vm_name) c.argument('vmss_name', vmss_name_type) c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help=""Space-separated application version ids to set to VM."") c.argument('order_applications', action='store_true', help='Whether to set order index at each gallery application. If specified, the first app version id gets specified an order = 1, then the next one 2, and so on. This parameter is meant to be used when the VMApplications specified by app version ids must be installed in a particular order; the lowest order is installed first.') c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*', help='Space-separated application configuration overrides for each application version ids. ' 'It should have the same number of items as the application version ids. Null is available for a application ' 'which does not have a configuration override.') c.argument('treat_deployment_as_failure', nargs='+', help=""Space-separated list of true or false corresponding to the application version ids. If set to true, failure to install or update gallery application version operation will fail this operation"") for scope in ['vm application list', 'vmss application list']: with self.argument_context(scope) as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) c.argument('vmss_name', vmss_name_type, id_part=None) for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location') c.argument('tags', tags_type) c.argument('no_wait', help='Do not wait for the long-running operation to finish.') c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('image', completer=get_urn_aliases_completion_list) c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type) c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ ""sourceVault"": { ""id"": ""value"" }, ""vaultCertificates"": [{ ""certificateUrl"": ""value"", ""certificateStore"": ""cert store name (only on windows)""}] }]`', type=file_type, completer=FilesCompleter()) c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help=""accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples"") c.ignore('aux_subscriptions') c.argument('edge_zone', edge_zone_type) c.argument('accept_term', action='store_true', help=""Accept the license agreement and privacy statement."") c.argument('disable_integrity_monitoring', action='store_true', min_api='2020-12-01', help='Disable the default behavior of installing guest attestation extension and enabling System Assigned Identity for Trusted Launch enabled VMs and VMSS.') with self.argument_context(scope, arg_group='Authentication') as c: c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory') c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.') c.argument('admin_password', help=""Password for the VM if authentication type is 'Password'."") c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+') c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value ""/home/username/.ssh/authorized_keys"" due to a known issue in Linux provisioning agent.') c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. ""all"" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all'])) with self.argument_context(scope, arg_group='Storage') as c: if DiskStorageAccountTypes: allowed_values = "", "".join([sku.value for sku in DiskStorageAccountTypes]) else: allowed_values = "", "".join(['Premium_LRS', 'Standard_LRS']) usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is ""os"" or a 0-indexed lun.' allowed_values = 'Allowed values: {}.'.format(allowed_values) storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \ 'or specify individual disks. {} {}'.format(usage, allowed_values) c.argument('os_disk_name', help='The name of the new VM OS disk.') c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux'])) c.argument('storage_account', help=""Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created."") c.argument('storage_sku', nargs='+', help=storage_sku_help) c.argument('storage_container_name', help=""Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds"") c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile') c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM') c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.') c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create') c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type') c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes)) c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+', help=""storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `= =` to configure individual disk"") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--ephemeral-os-disk`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.') c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01', help='Names or IDs (space delimited) of disk encryption sets for data disks.') c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.') c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.') c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01', help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.') c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)], nargs='+', min_api='2021-03-01', help='Specify whether data disk should be deleted or detached upon VM deletion. If a single data disk is attached, the allowed values are Delete and Detach. For multiple data disks are attached, please use ""=Delete =Detach"" to configure each disk') with self.argument_context(scope, arg_group='Network') as c: c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.') c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.') c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.') c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.') c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.') c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).') c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify """" for None (\'""""\' in Azure CLI using PowerShell or --% operator).') c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static'])) c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.') if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK): PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'', default=None, arg_type=get_enum_type(PublicIPAddressSkuName)) c.argument('nic_delete_option', nargs='+', min_api='2021-03-01', help='Specify what happens to the network interface when the VM is deleted. Use a singular ' 'value to apply on all resources, or use = to configure ' 'the delete behavior for individual resources. Possible options are Delete and Detach.') with self.argument_context(scope, arg_group='Marketplace Image Plan') as c: c.argument('plan_name', help='plan name') c.argument('plan_product', help='plan product') c.argument('plan_publisher', help='plan publisher') c.argument('plan_promotion_code', help='plan promotion code') for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help=""Scope that the system assigned identity can access. "") c.ignore('identity_role_id') for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], arg_group='Managed Service Identity', help='Role name or id the system assigned identity will have. ') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], help=""Role name or id the system assigned identity will have"") with self.argument_context('vm auto-shutdown') as c: c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.') c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)') c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730') c.argument('webhook', help='The webhook URL to which the notification will be sent') c.argument('location', validator=get_default_location_from_resource_group) for scope in ['vm diagnostics', 'vmss diagnostics']: with self.argument_context(scope) as c: c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied') c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('is_windows_os', action='store_true', help='for Windows VMs') for scope in ['vm encryption', 'vmss encryption']: with self.argument_context(scope) as c: c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL'])) c.argument('force', action='store_true', help='continue by ignoring client side validation errors') c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.') c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.') c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.') for scope in ['vm extension', 'vmss extension']: with self.argument_context(scope) as c: c.argument('publisher', help='The name of the extension publisher.') c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.') c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.') c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.') c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(), help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.') with self.argument_context('vm extension set') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part=None) c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) with self.argument_context('vmss extension set', min_api='2017-12-01') as c: c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.') for scope in ['vm extension image', 'vmss extension image']: with self.argument_context(scope) as c: c.argument('image_location', options_list=['--location', '-l'], help='Image location.') c.argument('name', help='Image name', id_part=None) c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name') c.argument('type', options_list=['--name', '-n'], help='Name of the extension') c.argument('latest', action='store_true', help='Show the latest version only.') c.argument('version', help='Extension version') c.argument('orderby', help=""the $orderby odata query option"") c.argument('top', help='the $top odata query option') for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('license_type', license_type) c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help=""Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular."") c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True, help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons') c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'], help='The ID or name of the capacity reservation group that is used to allocate. Pass in ""None"" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.', min_api='2021-04-01', is_preview=True) c.argument('v_cpus_available', type=int, min_api='2021-11-01', help='Specify the number of vCPUs available') c.argument('v_cpus_per_core', type=int, min_api='2021-11-01', help='Specify the ratio of vCPU to physical core. Setting this property to 1 also means that hyper-threading is disabled.') with self.argument_context('vm update') as c: c.argument('license_type', license_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') with self.argument_context('vmss create') as c: c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help=""Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular."") with self.argument_context('sig') as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition') c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version') for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']: with self.argument_context(scope) as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition') with self.argument_context('sig show') as c: c.argument('select', help='The select expression to apply on the operation.') c.argument('sharing_groups', action='store_true', help='The expand query option to query shared gallery groups') with self.argument_context('sig list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('shared_to', shared_to_type) with self.argument_context('sig show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') for scope in ['sig share add', 'sig share remove']: with self.argument_context(scope) as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.') c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.') with self.argument_context('sig share add') as c: c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share remove') as c: c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share reset') as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') with self.argument_context('sig image-definition create') as c: c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD') c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help=""This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'."") c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores') c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores') c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB') c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB') c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan') c.argument('plan_name', help='plan name', arg_group='Purchase plan') c.argument('plan_product', help='plan product', arg_group='Purchase plan') c.argument('eula', help='The Eula agreement for the gallery image') c.argument('privacy_statement_uri', help='The privacy statement uri') c.argument('release_note_uri', help='The release note uri') c.argument('end_of_life_date', help=""the end of life date, e.g. '2020-12-31'"") c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS') c.argument('features', help='A list of gallery image features. E.g. ""IsSecureBootSupported=true IsMeasuredBootSupported=false""') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='gallery_images')), min_api='2021-10-01', help='CPU architecture.') with self.argument_context('sig image-definition list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-definition show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') with self.argument_context('sig create') as c: c.argument('description', help='the description of the gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig update') as c: c.ignore('gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig image-definition create') as c: c.argument('description', help='the description of the gallery image definition') with self.argument_context('sig image-definition update') as c: c.ignore('gallery_image') with self.argument_context('sig image-version') as c: deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration=""3.0.0"") c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`') with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c: c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`') c.argument('description', help='the description of the gallery image version') c.argument('managed_image', help='image name(if in the same resource group) or resource id') c.argument('os_snapshot', help='Name or ID of OS disk snapshot') c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots') c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots') c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.') c.argument('version', help='image version') c.argument('end_of_life_date', help=""the end of life date, e.g. '2020-12-31'"") c.argument('storage_account_type', help=""The default storage account type to be used per region. To set regional storage account types, use --target-regions"", arg_type=get_enum_type([""Standard_LRS"", ""Standard_ZRS"", ""Premium_LRS""]), min_api='2019-03-01') c.argument('target_region_encryption', nargs='+', help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `,,,,`. Use ""null"" as a placeholder.') c.argument('os_vhd_uri', help='Source VHD URI of OS disk') c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk') c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks') c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks') c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks') c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.') c.argument('target_region_cvm_encryption', nargs='+', min_api='2021-10-01', help='Space-separated list of customer managed key for Confidential VM encrypting the OS disk in the gallery artifact for each region. Format for each region: `,`. The valid values for os_cvm_encryption_type are EncryptedVMGuestStateOnlyWithPmk, EncryptedWithPmk, EncryptedWithCmk.') c.argument('virtual_machine', help='Resource id of VM source') c.argument('image_version', help='Resource id of gallery image version source') with self.argument_context('sig image-version list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-version show') as c: c.argument('expand', help=""The expand expression to apply on the operation, e.g. 'ReplicationStatus'"") with self.argument_context('sig image-version show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The ' 'name of the gallery image version to be created. Needs to follow semantic version name pattern: ' 'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. ' 'Format: ..', id_part='child_name_3') for scope in ['sig image-version create', 'sig image-version update']: with self.argument_context(scope) as c: c.argument('target_regions', nargs='*', help='Space-separated list of regions and their replica counts. Use `[=][=]` to optionally set the replica count and/or storage account type for each region. ' 'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used') c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int) # endregion # region Gallery applications with self.argument_context('sig gallery-application') as c: c.argument('gallery_application_name', options_list=['--name', '-n', '--application-name'], help='The name of the gallery Application') with self.argument_context('sig gallery-application create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='This property allows you ' 'to specify the supported type of the OS that application is built for.

Possible values ' 'are:

**Windows**

**Linux**') with self.argument_context('sig gallery-application update') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') with self.argument_context('sig gallery-application version') as c: c.argument('gallery_application_name', options_list=['--application-name'], help='The name of the gallery Application') c.argument('gallery_application_version_name', options_list=['--name', '-n', '--version-name'], help='The name of the gallery Application Version') for scope in ['create', 'update']: with self.argument_context('sig gallery-application version {}'.format(scope)) as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('package_file_link', help='The mediaLink of the artifact, must be a readable storage page blob.') c.argument('install_command', help='The path and arguments to install the gallery application.') c.argument('remove_command', help='The path and arguments to remove the gallery application.') c.argument('update_command', help='The path and arguments to update the gallery application. If not present,' ' then update operation will invoke remove command on the previous version ' 'and install command on the current version of the gallery application.') c.argument('target_regions', type=validate_file_or_dict, help='The target regions where the Image Version is ' 'going to be replicated to. This property is updatable. Expected value: ' 'json-string/json-file/@json-file.') c.argument('default_file_link', help='The default configuration link of the artifact, must be a readable storage page blob.') c.argument('exclude_from', arg_type=get_three_state_flag(), help='If set to true, Virtual Machines ' 'deployed from the latest version of the Image Definition won\'t use this Image Version.', arg_group='Publishing Profile') c.argument('end_of_life_date', help='The end of life date of the gallery image version. This property can be ' 'used for decommissioning purposes. This property is updatable.', arg_group='Publishing Profile') # endregion # region Proximity Placement Group with self.argument_context('ppg', min_api='2018-04-01') as c: c.argument('proximity_placement_group_name', arg_type=name_arg_type, help=""The name of the proximity placement group."") with self.argument_context('ppg create', min_api='2018-04-01') as c: c.argument('ppg_type', options_list=['--type', '-t'], help=""The type of the proximity placement group. Allowed values: Standard."") c.argument('tags', tags_type) with self.argument_context('ppg show', min_api='2019-07-01') as c: c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.') for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'), ('vm availability-set create', 'availability set'), ('vm update', 'VM'), ('vmss update', 'VMSS'), ('vm availability-set update', 'availability set')]: with self.argument_context(scope, min_api='2018-04-01') as c: c.argument('proximity_placement_group', options_list=['--ppg'], help=""The name or ID of the proximity placement group the {} should be associated with."".format(item), validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added. # endregion # region VM Monitor with self.argument_context('vm monitor log show') as c: c.argument('analytics_query', options_list=['--analytics-query', '-q'], help=""Query to execute over Log Analytics data."") c.argument('timespan', help=""Timespan over which to query. Defaults to querying all available data."") with self.argument_context('vm monitor metrics') as c: c.argument('metricnamespace', options_list=['--namespace'], help='Namespace to query metric definitions for.') with self.argument_context('vm monitor metrics tail') as c: from azure.mgmt.monitor.models import AggregationType c.extra('resource_group_name', required=True) c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) c.argument('metadata', action='store_true') c.argument('dimension', nargs='*', validator=validate_metric_dimension) c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*') c.argument('metrics', nargs='*') c.argument('orderby', help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc') c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.') c.argument('filters', options_list=['--filter']) c.argument('metric_namespace', options_list=['--namespace']) with self.argument_context('vm monitor metrics tail', arg_group='Time') as c: c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.')) c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.')) c.argument('offset', type=get_period_type(as_timedelta=True)) c.argument('interval', arg_group='Time', type=get_period_type()) with self.argument_context('vm monitor metrics list-definitions') as c: c.extra('resource_group_name', required=True) c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) # endregion # region disk encryption set with self.argument_context('disk-encryption-set') as c: c.argument('disk_encryption_set_name', disk_encryption_set_name) c.argument('key_url', help='URL pointing to a key or secret in KeyVault.') c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.') c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']), help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01', options_list=['--enable-auto-key-rotation', '--auto-rotation'], help='Enable automatic rotation of keys.') # endregion # region DiskAccess with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c: c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) # endRegion # region Capacity with self.argument_context('capacity reservation group') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'], help='The name of the capacity reservation group.') c.argument('tags', tags_type) with self.argument_context('capacity reservation group create') as c: c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.') with self.argument_context('capacity reservation group show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.') with self.argument_context('capacity reservation group list') as c: c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.') c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.') with self.argument_context('capacity reservation') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'], help='The name of the capacity reservation group.') c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'], help='The name of the capacity reservation.') c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.') c.argument('tags', tags_type) with self.argument_context('capacity reservation create') as c: c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.') c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called ""CapacityReservationSupported"" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.') with self.argument_context('capacity reservation show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.') # endRegion # region Restore point with self.argument_context('restore-point') as c: c.argument('restore_point_collection_name', options_list=['--collection-name'], help='The name of the restore point collection.') with self.argument_context('restore-point create') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('exclude_disks', nargs='+', help='List of disk resource ids that the ' 'customer wishes to exclude from the restore point. If no disks are specified, all disks will be ' 'included.') c.argument('source_restore_point', help='Resource Id of the source restore point from which a copy needs to be created') with self.argument_context('restore-point show') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='Show the instance view of a restore point.') with self.argument_context('restore-point delete') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') with self.argument_context('restore-point wait') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') # endRegion # region Restore point collection with self.argument_context('restore-point collection create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('source_id', help='Resource Id of the source resource used to create this restore point collection', arg_group='Source') with self.argument_context('restore-point collection update') as c: c.argument('tags', tags_type) with self.argument_context('restore-point collection show') as c: c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('restore_points', action='store_true', help='Show all contained restore points in the restore point collection.') " 24981,"def get_functional_test_files_from_directory(input_dir: Path) -> List[Tuple[str, Path]]: """"""Get all functional tests in the input_dir. This also checks the formatting of related.rst files. """""" suite: List[Tuple[str, Path]] = [] for subdirectory in input_dir.iterdir(): for message_dir in subdirectory.iterdir(): assert_msg = ( f""{subdirectory}: '{message_dir.name}' is in the wrong "" f""directory: it does not start by '{subdirectory.name}'"" ) assert message_dir.name.startswith(subdirectory.name), assert_msg if (message_dir / ""good.py"").exists(): suite.append( (message_dir.stem, message_dir / ""good.py""), ) if (message_dir / ""bad.py"").exists(): suite.append( (message_dir.stem, message_dir / ""bad.py""), ) if (message_dir / ""related.rst"").exists(): with open(message_dir / ""related.rst"", encoding=""utf-8"") as file: text = file.read() assert text.startswith( ""-"" ), f""{message_dir / 'related.rst'} should be a list using '-'."" return suite ","def get_functional_test_files_from_directory(input_dir: Path) -> List[Tuple[str, Path]]: """"""Get all functional tests in the input_dir. This also checks the formatting of related.rst files. """""" suite: List[Tuple[str, Path]] = [] for subdirectory in input_dir.iterdir(): for message_dir in subdirectory.iterdir(): assert_msg = ( f""{subdirectory}: '{message_dir.name}' is in the wrong "" f""directory: it does not start with '{subdirectory.name}'"" ) assert message_dir.name.startswith(subdirectory.name), assert_msg if (message_dir / ""good.py"").exists(): suite.append( (message_dir.stem, message_dir / ""good.py""), ) if (message_dir / ""bad.py"").exists(): suite.append( (message_dir.stem, message_dir / ""bad.py""), ) if (message_dir / ""related.rst"").exists(): with open(message_dir / ""related.rst"", encoding=""utf-8"") as file: text = file.read() assert text.startswith( ""-"" ), f""{message_dir / 'related.rst'} should be a list using '-'."" return suite " 54517,"def test_plot_parallel_coordinate_unique_hyper_param() -> None: # Test case when one unique value is suggested during the optimization. study_categorical_params = create_study() study_categorical_params.add_trial( create_trial( value=0.0, params={""category_a"": ""preferred"", ""param_b"": 30}, distributions={ ""category_a"": CategoricalDistribution((""preferred"", ""opt"")), ""param_b"": FloatDistribution(1, 1000, log=True), }, ) ) # Both hyperparameters contain unique values. figure = plot_parallel_coordinate(study_categorical_params) axes = figure.get_figure().axes assert len(axes) == 3 + 1 # Default padding is 5% in Matplotlib. default_padding_fraction = 0.05 assert axes[0].get_ylim() == (-default_padding_fraction, default_padding_fraction) assert axes[1].get_ylabel() == ""Objective Value"" # Optuna's parallel coordinate uses 10% padding for color map. assert axes[1].get_ylim() == (-0.1, 0.1) assert len(figure.findobj(LineCollection)) == 1 # Objective values are not sorted by the other parameters, # unlike Plotly's parallel_coordinate. line_collections = figure.findobj(LineCollection) objectives = [line[0, 1] for line in line_collections[0].get_segments()] assert objectives == [0.0] assert axes[2].get_ylim() == (-default_padding_fraction, default_padding_fraction) assert [lien.get_text() for lien in axes[2].get_yticklabels()] == [""preferred""] assert [label.get_position()[1] for label in axes[2].get_yticklabels()] == [0] assert axes[3].get_ylim() == ( 30 * (1.0 - default_padding_fraction), 30 * (1.0 + default_padding_fraction), ) expected_labels = [""Objective Value"", ""category_a"", ""param_b""] xtick_labels = axes[0].get_xticklabels() for expected_label, xtick_label in zip(expected_labels, xtick_labels): assert expected_label == xtick_label.get_text() plt.savefig(BytesIO()) study_categorical_params.add_trial( create_trial( value=2.0, params={""category_a"": ""preferred"", ""param_b"": 20}, distributions={ ""category_a"": CategoricalDistribution((""preferred"", ""opt"")), ""param_b"": FloatDistribution(1, 1000, log=True), }, ) ) # Still ""category_a"" contains unique suggested value during the optimization. figure = plot_parallel_coordinate(study_categorical_params) axes = figure.get_figure().axes assert len(axes) == 3 + 1 assert axes[0].get_ylim() == (0.0, 2.0) assert axes[1].get_ylabel() == ""Objective Value"" assert axes[1].get_ylim() == (0.0, 2.0) assert len(figure.findobj(LineCollection)) == 1 # Objective values are not sorted by the other parameters, # unlike Plotly's parallel_coordinate. line_collections = figure.findobj(LineCollection) objectives = [line[0, 1] for line in line_collections[0].get_segments()] assert objectives == [0.0, 2.0] assert axes[2].get_ylim() == (-default_padding_fraction, default_padding_fraction) assert [lien.get_text() for lien in axes[2].get_yticklabels()] == [""preferred""] assert [label.get_position()[1] for label in axes[2].get_yticklabels()] == [0] assert axes[3].get_ylim() == (20, 30) expected_labels = [""Objective Value"", ""category_a"", ""param_b""] xtick_labels = axes[0].get_xticklabels() for expected_label, xtick_label in zip(expected_labels, xtick_labels): assert expected_label == xtick_label.get_text() plt.savefig(BytesIO()) ","def test_plot_parallel_coordinate_unique_hyper_param() -> None: # Test case when one unique value is suggested during the optimization. study_categorical_params = create_study() study_categorical_params.add_trial( create_trial( value=0.0, params={""category_a"": ""preferred"", ""param_b"": 30}, distributions={ ""category_a"": CategoricalDistribution((""preferred"", ""opt"")), ""param_b"": FloatDistribution(1, 1000, log=True), }, ) ) # Both hyperparameters contain unique values. figure = plot_parallel_coordinate(study_categorical_params) axes = figure.get_figure().axes assert len(axes) == 3 + 1 # Default padding is 5% in Matplotlib. default_padding_fraction = plt.margins()[0] assert axes[0].get_ylim() == (-default_padding_fraction, default_padding_fraction) assert axes[1].get_ylabel() == ""Objective Value"" # Optuna's parallel coordinate uses 10% padding for color map. assert axes[1].get_ylim() == (-0.1, 0.1) assert len(figure.findobj(LineCollection)) == 1 # Objective values are not sorted by the other parameters, # unlike Plotly's parallel_coordinate. line_collections = figure.findobj(LineCollection) objectives = [line[0, 1] for line in line_collections[0].get_segments()] assert objectives == [0.0] assert axes[2].get_ylim() == (-default_padding_fraction, default_padding_fraction) assert [lien.get_text() for lien in axes[2].get_yticklabels()] == [""preferred""] assert [label.get_position()[1] for label in axes[2].get_yticklabels()] == [0] assert axes[3].get_ylim() == ( 30 * (1.0 - default_padding_fraction), 30 * (1.0 + default_padding_fraction), ) expected_labels = [""Objective Value"", ""category_a"", ""param_b""] xtick_labels = axes[0].get_xticklabels() for expected_label, xtick_label in zip(expected_labels, xtick_labels): assert expected_label == xtick_label.get_text() plt.savefig(BytesIO()) study_categorical_params.add_trial( create_trial( value=2.0, params={""category_a"": ""preferred"", ""param_b"": 20}, distributions={ ""category_a"": CategoricalDistribution((""preferred"", ""opt"")), ""param_b"": FloatDistribution(1, 1000, log=True), }, ) ) # Still ""category_a"" contains unique suggested value during the optimization. figure = plot_parallel_coordinate(study_categorical_params) axes = figure.get_figure().axes assert len(axes) == 3 + 1 assert axes[0].get_ylim() == (0.0, 2.0) assert axes[1].get_ylabel() == ""Objective Value"" assert axes[1].get_ylim() == (0.0, 2.0) assert len(figure.findobj(LineCollection)) == 1 # Objective values are not sorted by the other parameters, # unlike Plotly's parallel_coordinate. line_collections = figure.findobj(LineCollection) objectives = [line[0, 1] for line in line_collections[0].get_segments()] assert objectives == [0.0, 2.0] assert axes[2].get_ylim() == (-default_padding_fraction, default_padding_fraction) assert [lien.get_text() for lien in axes[2].get_yticklabels()] == [""preferred""] assert [label.get_position()[1] for label in axes[2].get_yticklabels()] == [0] assert axes[3].get_ylim() == (20, 30) expected_labels = [""Objective Value"", ""category_a"", ""param_b""] xtick_labels = axes[0].get_xticklabels() for expected_label, xtick_label in zip(expected_labels, xtick_labels): assert expected_label == xtick_label.get_text() plt.savefig(BytesIO()) " 7245,"def threshold_phansalkar(image, window_size=15, k=0.25, r=None, p=2.0, q=10.0): """"""Applies Phansalkar local threshold to an array. Phansalkar is a modification of Sauvola technique to deal with low contrast images. This method is using the following formula:: T = m(x,y) * (1 + p * exp( -q * m(x,y) ) + k * ((s(x,y) / R) - 1)) where m(x,y) and s(x,y) are the mean and standard deviation of pixel (x,y) neighborhood defined by a rectangular window with size w times w centered around the pixel. k, p and q are configurable parameters. R is the maximum standard deviation of a greyscale image. Parameters ---------- image : ndarray Input image. window_size : int, or iterable of int, optional Window size specified as a single odd integer (3, 5, 7, …), or an iterable of length ``image.ndim`` containing only odd integers (e.g. ``(1, 5, 5)``). k : float, optional Value of the positive parameter k. r : float, optional Value of R, the dynamic range of standard deviation. If None, set to the half of the image dtype range. p : float, optional Value of the parameter p. q : float, optional Value of the parameter q. Returns ------- threshold : (N, M) ndarray Threshold mask. All pixels with an intensity higher than this value are assumed to be foreground. Notes ----- This algorithm is originally designed for detection of cell nuclei in low contrast images. Therefore the historgram has to be equalized beforehand using skimage.exposure.equalize_adapthist(). References ---------- .. [1] Phansalskar N. et al. ""Adaptive local thresholding for detection of nuclei in diversity stained cytology images."", International Conference on Communications and Signal Processing (ICCSP), pp. 218-220, 2011 :DOI:`10.1109/ICCSP.2011.5739305` Examples -------- >>> from skimage import data >>> from skimage.exposure import equalize_adapthist >>> image = data.moon() >>> image_eq = equalize_adapthist(image) >>> t_phansalkar = threshold_phansalkar(image_eq, window_size=15, k=0.25) >>> binary_image = image_eq > t_phansalkar """""" if r is None: imin, imax = dtype_limits(image, clip_negative=False) r = 0.5 * (imax - imin) m, s = _mean_std(image, window_size) return m * (1 + np.power(p, (-q * m)) + k * ((s / r) - 1)) ","def threshold_phansalkar(image, window_size=15, k=0.25, r=None, p=2.0, q=10.0): """"""Applies Phansalkar local threshold to an array. Phansalkar is a modification of Sauvola technique to deal with low contrast images. This method is using the following formula:: T = m(x,y) * (1 + p * exp( -q * m(x,y) ) + k * ((s(x,y) / R) - 1)) where m(x,y) and s(x,y) are the mean and standard deviation of pixel (x,y) neighborhood defined by a rectangular window with size w times w centered around the pixel. k, p and q are configurable parameters. R is the maximum standard deviation of a greyscale image. Parameters ---------- image : ndarray Input image. window_size : int, or iterable of int, optional Window size specified as a single odd integer (3, 5, 7, …), or an iterable of length ``image.ndim`` containing only odd integers (e.g. ``(1, 5, 5)``). k : float, optional Value of the positive parameter k. r : float, optional Value of R, the dynamic range of standard deviation. If None, set to the half of the image dtype range. p : float, optional Value of the parameter p. q : float, optional Value of the parameter q. Returns ------- threshold : (N, M) ndarray Threshold mask. All pixels with an intensity higher than this value are assumed to be foreground. Notes ----- This algorithm is originally designed for detection of cell nuclei in low contrast images. Therefore the historgram has to be equalized beforehand using :func:`skimage.exposure.equalize_adapthist`. References ---------- .. [1] Phansalskar N. et al. ""Adaptive local thresholding for detection of nuclei in diversity stained cytology images."", International Conference on Communications and Signal Processing (ICCSP), pp. 218-220, 2011 :DOI:`10.1109/ICCSP.2011.5739305` Examples -------- >>> from skimage import data >>> from skimage.exposure import equalize_adapthist >>> image = data.moon() >>> image_eq = equalize_adapthist(image) >>> t_phansalkar = threshold_phansalkar(image_eq, window_size=15, k=0.25) >>> binary_image = image_eq > t_phansalkar """""" if r is None: imin, imax = dtype_limits(image, clip_negative=False) r = 0.5 * (imax - imin) m, s = _mean_std(image, window_size) return m * (1 + np.power(p, (-q * m)) + k * ((s / r) - 1)) " 636,"def save_input_file(path, rmg): """""" Save an RMG input file at `path` on disk from the :class:`RMG` object `rmg`. """""" f = open(path, 'w') # Databases f.write('database(\n') # f.write(' ""{0}"",\n'.format(rmg.database_directory)) f.write(' thermoLibraries = {0!r},\n'.format(rmg.thermo_libraries)) f.write(' reactionLibraries = {0!r},\n'.format(rmg.reaction_libraries)) f.write(' seedMechanisms = {0!r},\n'.format(rmg.seed_mechanisms)) f.write(' kineticsDepositories = {0!r},\n'.format(rmg.kinetics_depositories)) f.write(' kineticsFamilies = {0!r},\n'.format(rmg.kinetics_families)) f.write(' kineticsEstimator = {0!r},\n'.format(rmg.kinetics_estimator)) f.write(')\n\n') if rmg.surface_site_density or rmg.binding_energies: f.write('catalystProperties(\n') if rmg.surface_site_density: f.write(' surfaceSiteDensity = {0!r},\n'.format(rmg.surface_site_density)) if rmg.binding_energies: f.write(' bindingEnergies = {\n') for elem, be in rmg.binding_energies.items(): f.write(' {0!r}:{1!r},\n'.format(elem, be)) f.write(' },\n') if rmg.coverage_dependence: f.write(' coverageDependence = {0},\n'.format(rmg.coverage_dependence)) f.write(')\n\n') # Species for spcs in rmg.initial_species: f.write('species(\n') f.write(' label = ""{0}"",\n'.format(spcs.label)) f.write(' reactive = {0},\n'.format(spcs.reactive)) f.write(' structure = adjacencyList(\n') f.write('""""""\n') f.write(spcs.molecule[0].to_adjacency_list()) f.write('""""""),\n') f.write(')\n\n') # Reaction systems for system in rmg.reaction_systems: if rmg.solvent: f.write('liquidReactor(\n') f.write(' temperature = ({0:g},""{1!s}""),\n'.format(system.T.value, system.T.units)) f.write(' initialConcentrations={\n') for spcs, conc in system.initial_concentrations.items(): f.write(' ""{0!s}"": ({1:g},""{2!s}""),\n'.format(spcs.label, conc.value, conc.units)) elif isinstance(system, SurfaceReactor): f.write('surfaceReactor(\n') # determine if it is a ranged reactor if system.T: f.write(' temperature = ({0:g}, {1!s}),\n'.format(system.T.value, system.T.units)) else: Trange_lo = (system.Trange[0].value, system.Trange[0].units) Trange_hi = (system.Trange[1].value, system.Trange[1].units) f.write(' temperature = [{0}, {1}],\n'.format(Trange_lo, Trange_hi)) # Convert the pressure from SI pascal units to bar here # Do something more fancy later for converting to user's desired units for both T and P.. if system.P_initial: f.write(' initialPressure = ({0:g}, ""{1!s}""),\n'.format(system.P_initial.value, system.P_initial.units)) else: Prange_lo = (system.Prange[0].value, system.Prange[0].units) Prange_hi = (system.Prange[1].value, system.Prange[1].units) f.write(' temperature = [{0}, {1}],\n'.format(Prange_lo, Prange_hi)) f.write(' initialGasMoleFractions={\n') for spcs, molfrac in system.initial_gas_mole_fractions.items(): f.write(' ""{0!s}"": {1:g},\n'.format(spcs.label, molfrac)) f.write(' },\n') f.write(' initialSurfaceCoverages={\n') for spcs, molfrac in system.initial_surface_coverages.items(): f.write(' ""{0!s}"": {1:g},\n'.format(spcs.label, molfrac)) f.write(' },\n') f.write(' surfaceVolumeRatio=({0:g},""{1!s}""),\n'.format(system.surface_volume_ratio.value, system.surface_volume_ratio.units)) else: f.write('simpleReactor(\n') if system.T: f.write(' temperature = ({0:g}, {1!s}),\n'.format(system.T.value, system.T.units)) else: Trange_lo = (system.Trange[0].value, system.Trange[0].units) Trange_hi = (system.Trange[1].value, system.Trange[1].units) f.write(' temperature = [{0}, {1}],\n'.format(Trange_lo, Trange_hi)) # Convert the pressure from SI pascal units to bar here # Do something more fancy later for converting to user's desired units for both T and P.. if system.P_initial: f.write(' pressure = ({0:g}, {1!s}),\n'.format(system.P_initial.value, system.P_initial.units)) else: Prange_lo = (system.Prange[0].value, system.Prange[0].units) Prange_hi = (system.Prange[1].value, system.Prange[1].units) f.write(' temperature = [{0}, {1}],\n'.format(Prange_lo, Prange_hi)) f.write(' initialMoleFractions={\n') for spcs, molfrac in system.initial_gas_mole_fractions.items(): f.write(' ""{0!s}"": {1:g},\n'.format(spcs.label, molfrac)) f.write(' },\n') # Termination criteria conversions = '' for term in system.termination: if isinstance(term, TerminationTime): f.write(' terminationTime = ({0:g},""{1!s}""),\n'.format(term.time.value, term.time.units)) elif isinstance(term,TerminationConversion): conversions += ' ""{0:s}"": {1:g},\n'.format(term.species.label, term.conversion) elif isinstance(term, TerminationRateRatio): f.write(' terminationRateRatio = {0:g},\n'.format(term.ratio)) if conversions: f.write(' terminationConversion = {\n') f.write(conversions) f.write(' },\n') # Sensitivity analysis if system.sensitive_species: sensitivity = [] for item in system.sensitive_species: sensitivity.append(item.label) f.write(' sensitivity = {0},\n'.format(sensitivity)) f.write(' sensitivityThreshold = {0},\n'.format(system.sensitivity_threshold)) f.write(')\n\n') if rmg.solvent: f.write(""solvation(\n solvent = '{0!s}'\n)\n\n"".format(rmg.solvent)) # Simulator tolerances f.write('simulator(\n') f.write(' atol = {0:g},\n'.format(rmg.simulator_settings_list[0].atol)) f.write(' rtol = {0:g},\n'.format(rmg.simulator_settings_list[0].rtol)) f.write(' sens_atol = {0:g},\n'.format(rmg.simulator_settings_list[0].sens_atol)) f.write(' sens_rtol = {0:g},\n'.format(rmg.simulator_settings_list[0].sens_rtol)) f.write(')\n\n') # Model f.write('model(\n') f.write(' toleranceMoveToCore = {0:g},\n'.format(rmg.model_settings_list[0].tol_move_to_core)) f.write(' toleranceKeepInEdge = {0:g},\n'.format(rmg.model_settings_list[0].tol_keep_in_edge)) f.write(' toleranceInterruptSimulation = {0:g},\n'.format(rmg.model_settings_list[0].tol_interrupt_simulation)) f.write(' maximumEdgeSpecies = {0:d},\n'.format(rmg.model_settings_list[0].maximum_edge_species)) f.write(' minCoreSizeForPrune = {0:d},\n'.format(rmg.model_settings_list[0].min_core_size_for_prune)) f.write(' minSpeciesExistIterationsForPrune = {0:d},\n'.format(rmg.model_settings_list[0].min_species_exist_iterations_for_prune)) f.write(' filterReactions = {0:d},\n'.format(rmg.model_settings_list[0].filter_reactions)) f.write(' filterThreshold = {0:g},\n'.format(rmg.model_settings_list[0].filter_threshold)) f.write(')\n\n') # Pressure Dependence if rmg.pressure_dependence: f.write('pressureDependence(\n') f.write(' method = {0!r},\n'.format(rmg.pressure_dependence.method)) f.write(' maximumGrainSize = ({0:g},""{1!s}""),\n'.format(rmg.pressure_dependence.grain_size.value, rmg.pressure_dependence.grain_size.units)) f.write(' minimumNumberOfGrains = {0},\n'.format(rmg.pressure_dependence.grain_count)) f.write(' temperatures = ({0:g},{1:g},""{2!s}"",{3:d}),\n'.format( rmg.pressure_dependence.Tmin.value, rmg.pressure_dependence.Tmax.value, rmg.pressure_dependence.Tmax.units, rmg.pressure_dependence.Tcount, )) f.write(' pressures = ({0:g},{1:g},""{2!s}"",{3:d}),\n'.format( rmg.pressure_dependence.Pmin.value, rmg.pressure_dependence.Pmax.value, rmg.pressure_dependence.Pmax.units, rmg.pressure_dependence.Pcount, )) f.write(' interpolation = {0},\n'.format(rmg.pressure_dependence.interpolation_model)) f.write(' maximumAtoms = {0}, \n'.format(rmg.pressure_dependence.maximum_atoms)) f.write(')\n\n') # Quantum Mechanics if rmg.quantum_mechanics: f.write('quantumMechanics(\n') f.write(' software = {0!r},\n'.format(rmg.quantum_mechanics.settings.software)) f.write(' method = {0!r},\n'.format(rmg.quantum_mechanics.settings.method)) # Split paths created by QMSettings if rmg.quantum_mechanics.settings.fileStore: f.write(' fileStore = {0!r},\n'.format(os.path.split(rmg.quantum_mechanics.settings.fileStore)[0])) else: f.write(' fileStore = None,\n') if rmg.quantum_mechanics.settings.scratchDirectory: f.write(' scratchDirectory = {0!r},\n'.format( os.path.split(rmg.quantum_mechanics.settings.scratchDirectory)[0])) else: f.write(' scratchDirectory = None,\n') f.write(' onlyCyclics = {0},\n'.format(rmg.quantum_mechanics.settings.onlyCyclics)) f.write(' maxRadicalNumber = {0},\n'.format(rmg.quantum_mechanics.settings.maxRadicalNumber)) f.write(')\n\n') # Species Constraints if rmg.species_constraints: f.write('generatedSpeciesConstraints(\n') for constraint, value in sorted(list(rmg.species_constraints.items()), key=lambda constraint: constraint[0]): if value is not None and constraint is not ""explicitlyAllowedMolecules"": f.write(' {0} = {1},\n'.format(constraint, value)) f.write(')\n\n') # Options f.write('options(\n') f.write(' units = ""{0}"",\n'.format(rmg.units)) f.write(' generateOutputHTML = {0},\n'.format(rmg.generate_output_html)) f.write(' generatePlots = {0},\n'.format(rmg.generate_plots)) f.write(' saveSimulationProfiles = {0},\n'.format(rmg.save_simulation_profiles)) f.write(' saveEdgeSpecies = {0},\n'.format(rmg.save_edge_species)) f.write(' keepIrreversible = {0},\n'.format(rmg.keep_irreversible)) f.write(' trimolecularProductReversible = {0},\n'.format(rmg.trimolecular_product_reversible)) f.write(' verboseComments = {0},\n'.format(rmg.verbose_comments)) f.write(' wallTime = ""{0!s}"",\n'.format(rmg.walltime)) f.write(')\n\n') if rmg.forbidden_structures: for struct in rmg.forbidden_structures: f.write('forbidden(\n') f.write(' label=""{0!s}"",\n'.format(struct.label)) f.write(' structure=adjacencyList(\n') f.write('""""""\n') f.write(struct.item.to_adjacency_list()) f.write('""""""),\n') f.write(')\n\n') f.close() ","def save_input_file(path, rmg): """""" Save an RMG input file at `path` on disk from the :class:`RMG` object `rmg`. """""" f = open(path, 'w') # Databases f.write('database(\n') # f.write(' ""{0}"",\n'.format(rmg.database_directory)) f.write(' thermoLibraries = {0!r},\n'.format(rmg.thermo_libraries)) f.write(' reactionLibraries = {0!r},\n'.format(rmg.reaction_libraries)) f.write(' seedMechanisms = {0!r},\n'.format(rmg.seed_mechanisms)) f.write(' kineticsDepositories = {0!r},\n'.format(rmg.kinetics_depositories)) f.write(' kineticsFamilies = {0!r},\n'.format(rmg.kinetics_families)) f.write(' kineticsEstimator = {0!r},\n'.format(rmg.kinetics_estimator)) f.write(')\n\n') if rmg.surface_site_density or rmg.binding_energies: f.write('catalystProperties(\n') if rmg.surface_site_density: f.write(' surfaceSiteDensity = {0!r},\n'.format(rmg.surface_site_density)) if rmg.binding_energies: f.write(' bindingEnergies = {\n') for elem, be in rmg.binding_energies.items(): f.write(' {0!r}:{1!r},\n'.format(elem, be)) f.write(' },\n') if rmg.coverage_dependence: f.write(' coverageDependence = {0},\n'.format(rmg.coverage_dependence)) f.write(')\n\n') # Species for spcs in rmg.initial_species: f.write('species(\n') f.write(' label = ""{0}"",\n'.format(spcs.label)) f.write(' reactive = {0},\n'.format(spcs.reactive)) f.write(' structure = adjacencyList(\n') f.write('""""""\n') f.write(spcs.molecule[0].to_adjacency_list()) f.write('""""""),\n') f.write(')\n\n') # Reaction systems for system in rmg.reaction_systems: if rmg.solvent: f.write('liquidReactor(\n') f.write(' temperature = ({0:g},""{1!s}""),\n'.format(system.T.value, system.T.units)) f.write(' initialConcentrations={\n') for spcs, conc in system.initial_concentrations.items(): f.write(' ""{0!s}"": ({1:g},""{2!s}""),\n'.format(spcs.label, conc.value, conc.units)) elif isinstance(system, SurfaceReactor): f.write('surfaceReactor(\n') # determine if it is a ranged reactor if system.T: f.write(' temperature = ({0:g}, {1!s}),\n'.format(system.T.value, system.T.units)) else: Trange_lo = (system.Trange[0].value, system.Trange[0].units) Trange_hi = (system.Trange[1].value, system.Trange[1].units) f.write(' temperature = [{0}, {1}],\n'.format(Trange_lo, Trange_hi)) # Convert the pressure from SI pascal units to bar here # Do something more fancy later for converting to user's desired units for both T and P.. if system.P_initial: f.write(' initialPressure = ({0:g}, ""{1!s}""),\n'.format(system.P_initial.value, system.P_initial.units)) else: Prange_lo = (system.Prange[0].value, system.Prange[0].units) Prange_hi = (system.Prange[1].value, system.Prange[1].units) f.write(' temperature = [{0}, {1}],\n'.format(Prange_lo, Prange_hi)) f.write(' initialGasMoleFractions={\n') for spcs, molfrac in system.initial_gas_mole_fractions.items(): f.write(' ""{0!s}"": {1:g},\n'.format(spcs.label, molfrac)) f.write(' },\n') f.write(' initialSurfaceCoverages={\n') for spcs, molfrac in system.initial_surface_coverages.items(): f.write(' ""{0!s}"": {1:g},\n'.format(spcs.label, molfrac)) f.write(' },\n') f.write(' surfaceVolumeRatio=({0:g},""{1!s}""),\n'.format(system.surface_volume_ratio.value, system.surface_volume_ratio.units)) else: f.write('simpleReactor(\n') if system.T: f.write(' temperature = ({0:g}, {1!s}),\n'.format(system.T.value, system.T.units)) else: Trange_lo = (system.Trange[0].value, system.Trange[0].units) Trange_hi = (system.Trange[1].value, system.Trange[1].units) f.write(' temperature = [{0}, {1}],\n'.format(Trange_lo, Trange_hi)) # Convert the pressure from SI pascal units to bar here # Do something more fancy later for converting to user's desired units for both T and P.. if system.P_initial: f.write(' pressure = ({0:g}, {1!s}),\n'.format(system.P_initial.value, system.P_initial.units)) else: Prange_lo = (system.Prange[0].value, system.Prange[0].units) Prange_hi = (system.Prange[1].value, system.Prange[1].units) f.write(' temperature = [{0}, {1}],\n'.format(Prange_lo, Prange_hi)) f.write(' initialMoleFractions={\n') for spcs, molfrac in system.initial_gas_mole_fractions.items(): f.write(' ""{0!s}"": {1:g},\n'.format(spcs.label, molfrac)) f.write(' },\n') # Termination criteria conversions = '' for term in system.termination: if isinstance(term, TerminationTime): f.write(' terminationTime = ({0:g},""{1!s}""),\n'.format(term.time.value, term.time.units)) elif isinstance(term, TerminationConversion): conversions += ' ""{0:s}"": {1:g},\n'.format(term.species.label, term.conversion) elif isinstance(term, TerminationRateRatio): f.write(' terminationRateRatio = {0:g},\n'.format(term.ratio)) if conversions: f.write(' terminationConversion = {\n') f.write(conversions) f.write(' },\n') # Sensitivity analysis if system.sensitive_species: sensitivity = [] for item in system.sensitive_species: sensitivity.append(item.label) f.write(' sensitivity = {0},\n'.format(sensitivity)) f.write(' sensitivityThreshold = {0},\n'.format(system.sensitivity_threshold)) f.write(')\n\n') if rmg.solvent: f.write(""solvation(\n solvent = '{0!s}'\n)\n\n"".format(rmg.solvent)) # Simulator tolerances f.write('simulator(\n') f.write(' atol = {0:g},\n'.format(rmg.simulator_settings_list[0].atol)) f.write(' rtol = {0:g},\n'.format(rmg.simulator_settings_list[0].rtol)) f.write(' sens_atol = {0:g},\n'.format(rmg.simulator_settings_list[0].sens_atol)) f.write(' sens_rtol = {0:g},\n'.format(rmg.simulator_settings_list[0].sens_rtol)) f.write(')\n\n') # Model f.write('model(\n') f.write(' toleranceMoveToCore = {0:g},\n'.format(rmg.model_settings_list[0].tol_move_to_core)) f.write(' toleranceKeepInEdge = {0:g},\n'.format(rmg.model_settings_list[0].tol_keep_in_edge)) f.write(' toleranceInterruptSimulation = {0:g},\n'.format(rmg.model_settings_list[0].tol_interrupt_simulation)) f.write(' maximumEdgeSpecies = {0:d},\n'.format(rmg.model_settings_list[0].maximum_edge_species)) f.write(' minCoreSizeForPrune = {0:d},\n'.format(rmg.model_settings_list[0].min_core_size_for_prune)) f.write(' minSpeciesExistIterationsForPrune = {0:d},\n'.format(rmg.model_settings_list[0].min_species_exist_iterations_for_prune)) f.write(' filterReactions = {0:d},\n'.format(rmg.model_settings_list[0].filter_reactions)) f.write(' filterThreshold = {0:g},\n'.format(rmg.model_settings_list[0].filter_threshold)) f.write(')\n\n') # Pressure Dependence if rmg.pressure_dependence: f.write('pressureDependence(\n') f.write(' method = {0!r},\n'.format(rmg.pressure_dependence.method)) f.write(' maximumGrainSize = ({0:g},""{1!s}""),\n'.format(rmg.pressure_dependence.grain_size.value, rmg.pressure_dependence.grain_size.units)) f.write(' minimumNumberOfGrains = {0},\n'.format(rmg.pressure_dependence.grain_count)) f.write(' temperatures = ({0:g},{1:g},""{2!s}"",{3:d}),\n'.format( rmg.pressure_dependence.Tmin.value, rmg.pressure_dependence.Tmax.value, rmg.pressure_dependence.Tmax.units, rmg.pressure_dependence.Tcount, )) f.write(' pressures = ({0:g},{1:g},""{2!s}"",{3:d}),\n'.format( rmg.pressure_dependence.Pmin.value, rmg.pressure_dependence.Pmax.value, rmg.pressure_dependence.Pmax.units, rmg.pressure_dependence.Pcount, )) f.write(' interpolation = {0},\n'.format(rmg.pressure_dependence.interpolation_model)) f.write(' maximumAtoms = {0}, \n'.format(rmg.pressure_dependence.maximum_atoms)) f.write(')\n\n') # Quantum Mechanics if rmg.quantum_mechanics: f.write('quantumMechanics(\n') f.write(' software = {0!r},\n'.format(rmg.quantum_mechanics.settings.software)) f.write(' method = {0!r},\n'.format(rmg.quantum_mechanics.settings.method)) # Split paths created by QMSettings if rmg.quantum_mechanics.settings.fileStore: f.write(' fileStore = {0!r},\n'.format(os.path.split(rmg.quantum_mechanics.settings.fileStore)[0])) else: f.write(' fileStore = None,\n') if rmg.quantum_mechanics.settings.scratchDirectory: f.write(' scratchDirectory = {0!r},\n'.format( os.path.split(rmg.quantum_mechanics.settings.scratchDirectory)[0])) else: f.write(' scratchDirectory = None,\n') f.write(' onlyCyclics = {0},\n'.format(rmg.quantum_mechanics.settings.onlyCyclics)) f.write(' maxRadicalNumber = {0},\n'.format(rmg.quantum_mechanics.settings.maxRadicalNumber)) f.write(')\n\n') # Species Constraints if rmg.species_constraints: f.write('generatedSpeciesConstraints(\n') for constraint, value in sorted(list(rmg.species_constraints.items()), key=lambda constraint: constraint[0]): if value is not None and constraint is not ""explicitlyAllowedMolecules"": f.write(' {0} = {1},\n'.format(constraint, value)) f.write(')\n\n') # Options f.write('options(\n') f.write(' units = ""{0}"",\n'.format(rmg.units)) f.write(' generateOutputHTML = {0},\n'.format(rmg.generate_output_html)) f.write(' generatePlots = {0},\n'.format(rmg.generate_plots)) f.write(' saveSimulationProfiles = {0},\n'.format(rmg.save_simulation_profiles)) f.write(' saveEdgeSpecies = {0},\n'.format(rmg.save_edge_species)) f.write(' keepIrreversible = {0},\n'.format(rmg.keep_irreversible)) f.write(' trimolecularProductReversible = {0},\n'.format(rmg.trimolecular_product_reversible)) f.write(' verboseComments = {0},\n'.format(rmg.verbose_comments)) f.write(' wallTime = ""{0!s}"",\n'.format(rmg.walltime)) f.write(')\n\n') if rmg.forbidden_structures: for struct in rmg.forbidden_structures: f.write('forbidden(\n') f.write(' label=""{0!s}"",\n'.format(struct.label)) f.write(' structure=adjacencyList(\n') f.write('""""""\n') f.write(struct.item.to_adjacency_list()) f.write('""""""),\n') f.write(')\n\n') f.close() " 30964,"def main(): value = demisto.args()['left'] if type(value) is list: value = demisto.args()['left'][0] relative_date = demisto.args()['right'] return_results(check_date(value, relative_date)) ","def main(): value = demisto.args()['left'] if type(value) is list: value = demisto.args()['left'][0] relative_date = demisto.args().get('right') return_results(check_date(value, relative_date)) " 4151,"def p_mapping_pattern(s): pos = s.position() s.expect('{') if s.sy == '}': s.next() return MatchCaseNodes.MatchMappingPatternNode(pos) double_star_capture_target = None items_patterns = [] double_star_set_twice = None while True: if s.sy == '**': if double_star_capture_target: double_star_set_twice = s.position() s.next() double_star_capture_target = p_pattern_capture_target(s) else: # key=(literal_expr | attr) with tentatively_scan(s) as errors: pattern = p_literal_pattern(s) key = pattern.value if errors: pattern = p_value_pattern(s) key = pattern.value s.expect(':') value = p_pattern(s) items_patterns.append((key, value)) if s.sy==',': s.next() else: break if s.sy=='}': break if s.sy != '}': s.error(""Expected '}'"") s.next() if double_star_set_twice is not None: return Nodes.ErrorNode(double_star_set_twice, what = ""Double star capture set twice"") return MatchCaseNodes.MatchMappingPatternNode( pos, keys = [kv[0] for kv in items_patterns], value_patterns = [kv[1] for kv in items_patterns], double_star_capture_target = double_star_capture_target ) ","def p_mapping_pattern(s): pos = s.position() s.expect('{') if s.sy == '}': s.next() return MatchCaseNodes.MatchMappingPatternNode(pos) double_star_capture_target = None items_patterns = [] double_star_set_twice = None while True: if s.sy == '**': if double_star_capture_target: double_star_set_twice = s.position() s.next() double_star_capture_target = p_pattern_capture_target(s) else: # key=(literal_expr | attr) with tentatively_scan(s) as errors: pattern = p_literal_pattern(s) key = pattern.value if errors: pattern = p_value_pattern(s) key = pattern.value s.expect(':') value = p_pattern(s) items_patterns.append((key, value)) if s.sy==',': s.next() else: break if s.sy=='}': break s.expect('}') if double_star_set_twice is not None: return Nodes.ErrorNode(double_star_set_twice, what = ""Double star capture set twice"") return MatchCaseNodes.MatchMappingPatternNode( pos, keys = [kv[0] for kv in items_patterns], value_patterns = [kv[1] for kv in items_patterns], double_star_capture_target = double_star_capture_target ) " 5414,"def test_reinit_crypto(): # make sure reinit cryptot does not crash salt.utils.crypt.reinit_crypto() # make sure reinit does not crash when no crypt is found with patch(""salt.utils.crypt.HAS_CRYPTO"", None): with patch(""salt.utils.crypt.Random"", None): salt.utils.crypt.reinit_crypto() ","def test_reinit_crypto(): # make sure reinit crypto does not crash salt.utils.crypt.reinit_crypto() # make sure reinit does not crash when no crypt is found with patch(""salt.utils.crypt.HAS_CRYPTO"", None): with patch(""salt.utils.crypt.Random"", None): salt.utils.crypt.reinit_crypto() " 8081,"def get_body_heliographic_stonyhurst(body, time='now', observer=None): """""" Return a `~sunpy.coordinates.frames.HeliographicStonyhurst` frame for the location of a solar-system body at a specified time. Parameters ---------- body : `str` The solar-system body for which to calculate positions time : various Time to use as `~astropy.time.Time` or in a parse_time-compatible format observer : `~astropy.coordinates.SkyCoord` If not None, the returned coordinate is the apparent location (i.e., factors in light travel time) Returns ------- out : `~sunpy.coordinates.frames.HeliographicStonyhurst` Location of the solar-system body in the `~sunpy.coordinates.HeliographicStonyhurst` frame """""" obstime = parse_time(time) if observer is None: body_icrs = get_body_barycentric(body, obstime) else: observer_icrs = SkyCoord(observer).icrs.cartesian # This implementation is modeled after Astropy's `_get_apparent_body_position` light_travel_time = 0.*u.s emitted_time = obstime delta_light_travel_time = 1.*u.s # placeholder value while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s): body_icrs = get_body_barycentric(body, emitted_time) distance = (body_icrs - observer_icrs).norm() delta_light_travel_time = light_travel_time - distance / speed_of_light light_travel_time = distance / speed_of_light emitted_time = obstime - light_travel_time print('Apparent location factors in {} seconds of light travel time'.format(light_travel_time.to('s').value)) body_hgs = ICRS(body_icrs).transform_to(HGS(obstime=obstime)) return body_hgs ","def get_body_heliographic_stonyhurst(body, time='now', observer=None): """""" Return a `~sunpy.coordinates.frames.HeliographicStonyhurst` frame for the location of a solar-system body at a specified time. Parameters ---------- body : `str` The solar-system body for which to calculate positions time : various Time to use as `~astropy.time.Time` or in a parse_time-compatible format observer : `~astropy.coordinates.SkyCoord` If not None, the returned coordinate is the apparent location (i.e., factors in light travel time) Returns ------- out : `~sunpy.coordinates.frames.HeliographicStonyhurst` Location of the solar-system body in the `~sunpy.coordinates.HeliographicStonyhurst` frame """""" obstime = parse_time(time) if observer is None: body_icrs = get_body_barycentric(body, obstime) else: observer_icrs = SkyCoord(observer).icrs.cartesian # This implementation is modeled after Astropy's `_get_apparent_body_position` light_travel_time = 0.*u.s emitted_time = obstime delta_light_travel_time = 1.*u.s # placeholder value while np.any(np.fabs(delta_light_travel_time) > 1.0e-8 * u.s): body_icrs = get_body_barycentric(body, emitted_time) distance = (body_icrs - observer_icrs).norm() delta_light_travel_time = light_travel_time - distance / speed_of_light light_travel_time = distance / speed_of_light emitted_time = obstime - light_travel_time print('Apparent location factors in {} seconds of light travel time'.format(light_travel_time.to('s').value)) body_hgs = ICRS(body_icrs).transform_to(HGS(obstime=obstime)) return body_hgs " 23678,"def calc_surface_orientation(tracker_theta, axis_tilt=0, axis_azimuth=0): """""" Calculate the surface tilt and azimuth angles for a given tracker rotation. Parameters ---------- tracker_theta : numeric Tracker rotation angle [degrees] axis_tilt : float, default 0 The tilt of the axis of rotation with respect to horizontal [degrees] axis_azimuth : float, default 0 A value denoting the compass direction along which the axis of rotation lies. Measured east of north. [degrees] Returns ------- dict or DataFrame Contains keys ``'surface_tilt'`` and ``'surface_azimuth'`` representing the module orientation accounting for tracker rotation and axis orientation. [degrees] References ---------- .. [1] William F Marion and Aron P Dobos, ""Rotation Angle for the Optimum Tracking of One-Axis Trackers"", Technical Report NREL/TP-6A20-58891, July 2013. :doi:`10.2172/1089596` """""" with np.errstate(invalid='ignore', divide='ignore'): surface_tilt = acosd(cosd(tracker_theta) * cosd(axis_tilt)) # clip(..., -1, +1) to prevent arcsin(1 + epsilon) issues: azimuth_delta = asind(np.clip(sind(tracker_theta) / sind(surface_tilt), a_min=-1, a_max=1)) # Combine Eqs 2, 3, and 4: azimuth_delta = np.where(abs(tracker_theta) < 90, azimuth_delta, -azimuth_delta + np.sign(tracker_theta) * 180) # handle surface_tilt=0 case: azimuth_delta = np.where(sind(surface_tilt) != 0, azimuth_delta, 90) surface_azimuth = (axis_azimuth + azimuth_delta) % 360 out = { 'surface_tilt': surface_tilt, 'surface_azimuth': surface_azimuth, } if hasattr(tracker_theta, 'index'): out = pd.DataFrame(out) return out ","def calc_surface_orientation(tracker_theta, axis_tilt=0, axis_azimuth=0): """""" Calculate the surface tilt and azimuth angles for a given tracker rotation. Parameters ---------- tracker_theta : numeric Tracker rotation angle [degrees] axis_tilt : float, default 0 The tilt of the axis of rotation with respect to horizontal. [degree] axis_azimuth : float, default 0 A value denoting the compass direction along which the axis of rotation lies. Measured east of north. [degrees] Returns ------- dict or DataFrame Contains keys ``'surface_tilt'`` and ``'surface_azimuth'`` representing the module orientation accounting for tracker rotation and axis orientation. [degrees] References ---------- .. [1] William F Marion and Aron P Dobos, ""Rotation Angle for the Optimum Tracking of One-Axis Trackers"", Technical Report NREL/TP-6A20-58891, July 2013. :doi:`10.2172/1089596` """""" with np.errstate(invalid='ignore', divide='ignore'): surface_tilt = acosd(cosd(tracker_theta) * cosd(axis_tilt)) # clip(..., -1, +1) to prevent arcsin(1 + epsilon) issues: azimuth_delta = asind(np.clip(sind(tracker_theta) / sind(surface_tilt), a_min=-1, a_max=1)) # Combine Eqs 2, 3, and 4: azimuth_delta = np.where(abs(tracker_theta) < 90, azimuth_delta, -azimuth_delta + np.sign(tracker_theta) * 180) # handle surface_tilt=0 case: azimuth_delta = np.where(sind(surface_tilt) != 0, azimuth_delta, 90) surface_azimuth = (axis_azimuth + azimuth_delta) % 360 out = { 'surface_tilt': surface_tilt, 'surface_azimuth': surface_azimuth, } if hasattr(tracker_theta, 'index'): out = pd.DataFrame(out) return out " 30865,"def set_marketplace_bucket(server: Server, build_number, branch_name): marketplace_url_configuration = { 'marketplace.bootstrap.bypass.url': 'https://storage.googleapis.com/marketplace-ci-build/content/builds/{}/{}'.format(branch_name, build_number) } server.add_server_configuration(marketplace_url_configuration, 'failed to config marketplace url ', True) ","def get_modified_packs_ids(build: Build): marketplace_url_configuration = { 'marketplace.bootstrap.bypass.url': 'https://storage.googleapis.com/marketplace-ci-build/content/builds/{}/{}'.format(branch_name, build_number) } server.add_server_configuration(marketplace_url_configuration, 'failed to config marketplace url ', True) " 13588,"def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0): """""" randomisierte QB-Zerlegung See Algorithm 3.1 in [EMKB19]_. Parameters ---------- A : The |VectorArray| for which the randomized QB Decomposition is to be computed. target_rank : int The desired rank for the decomposition. If None rank = len(A). distribution : str Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`) oversampling : int Oversamplingparameter. Number of extra columns of the projectionmatrix. powerIterations : int Number of power Iterations. Returns ------- Q : |VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A. len(Q) = target_rank B : Numpy Array. Projection of the Input Matrix into the lower dimensional subspace. """""" assert isinstance(A, VectorArray) assert target_rank is None or target_rank <= len(A) assert distribution in ('normal', 'uniform') if A.dim == 0 or len(A) == 0: return A.space.zeros(), np.zeros((target_rank, len(A))) rank = len(A) if target_rank is None else target_rank + oversampling target_rank = len(A) if target_rank is None else target_rank Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A)) Y = A.lincomb(Omega)[:target_rank] # Power Iterations if(powerIterations > 0): for i in range(powerIterations): Q = gram_schmidt(Y)[:target_rank] Z, _ = spla.qr(A.inner(Q)) Y = A.lincomb(Z)[:target_rank] Q = gram_schmidt(Y)[:target_rank] B = Q.inner(A) return Q, B ","def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0): """""" randomisierte QB-Zerlegung See Algorithm 3.1 in [EMKB19]_. Parameters ---------- A : The |VectorArray| for which the randomized QB decomposition is to be computed. target_rank : int The desired rank for the decomposition. If None rank = len(A). distribution : str Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`) oversampling : int Oversamplingparameter. Number of extra columns of the projectionmatrix. powerIterations : int Number of power Iterations. Returns ------- Q : |VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A. len(Q) = target_rank B : Numpy Array. Projection of the Input Matrix into the lower dimensional subspace. """""" assert isinstance(A, VectorArray) assert target_rank is None or target_rank <= len(A) assert distribution in ('normal', 'uniform') if A.dim == 0 or len(A) == 0: return A.space.zeros(), np.zeros((target_rank, len(A))) rank = len(A) if target_rank is None else target_rank + oversampling target_rank = len(A) if target_rank is None else target_rank Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A)) Y = A.lincomb(Omega)[:target_rank] # Power Iterations if(powerIterations > 0): for i in range(powerIterations): Q = gram_schmidt(Y)[:target_rank] Z, _ = spla.qr(A.inner(Q)) Y = A.lincomb(Z)[:target_rank] Q = gram_schmidt(Y)[:target_rank] B = Q.inner(A) return Q, B " 53565,"def add(x, y): return x + y ","def square(x): return x * x square(x=4) " 3040,"def _align_method_FRAME(left, right, axis, flex=False, level=None): """""" Convert rhs to meet lhs dims if input is list, tuple or np.ndarray. Parameters ---------- left : DataFrame right : Any axis: int, str, or None flex: bool or None, default False Whether this is a flex op, in which case we reindex. None indices not to check for alignment. level : int or level name, default None Returns ------- left : DataFrame right : Any """""" def to_series(right): msg = ""Unable to coerce to Series, length must be {req_len}: given {given_len}"" if axis is not None and left._get_axis_name(axis) == ""index"": if len(left.index) != len(right): raise ValueError( msg.format(req_len=len(left.index), given_len=len(right)) ) right = left._constructor_sliced(right, index=left.index) else: if len(left.columns) != len(right): raise ValueError( msg.format(req_len=len(left.columns), given_len=len(right)) ) right = left._constructor_sliced(right, index=left.columns) return right if isinstance(right, np.ndarray): if right.ndim == 1: right = to_series(right) elif right.ndim == 2: if right.shape == left.shape: right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[0] == left.shape[0] and right.shape[1] == 1: # Broadcast across columns right = np.broadcast_to(right, left.shape) right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[1] == left.shape[1] and right.shape[0] == 1: # Broadcast along rows right = to_series(right[0, :]) else: raise ValueError( ""Unable to coerce to DataFrame, shape "" f""must be {left.shape}: given {right.shape}"" ) elif right.ndim > 2: raise ValueError( f""Unable to coerce to Series/DataFrame, dim must be <= 2: {right.shape}"" ) elif is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame)): # GH17901 right = to_series(right) if flex is not None and isinstance(right, ABCDataFrame): if not left._indexed_same(right): if flex: left, right = left.align(right, join=""outer"", level=level, copy=False) else: raise ValueError( ""Can only compare identically-labeled DataFrame objects"" ) elif isinstance(right, ABCSeries): # axis=1 is default for DataFrame-with-Series op axis = left._get_axis_number(axis) if axis is not None else 1 left, right = left.align( right, join=""outer"", axis=axis, level=level, copy=False ) return left, right ","def _align_method_FRAME(left, right, axis, flex: Optional[bool] = False, level: Optional[Label] = None): """""" Convert rhs to meet lhs dims if input is list, tuple or np.ndarray. Parameters ---------- left : DataFrame right : Any axis: int, str, or None flex: bool or None, default False Whether this is a flex op, in which case we reindex. None indices not to check for alignment. level : int or level name, default None Returns ------- left : DataFrame right : Any """""" def to_series(right): msg = ""Unable to coerce to Series, length must be {req_len}: given {given_len}"" if axis is not None and left._get_axis_name(axis) == ""index"": if len(left.index) != len(right): raise ValueError( msg.format(req_len=len(left.index), given_len=len(right)) ) right = left._constructor_sliced(right, index=left.index) else: if len(left.columns) != len(right): raise ValueError( msg.format(req_len=len(left.columns), given_len=len(right)) ) right = left._constructor_sliced(right, index=left.columns) return right if isinstance(right, np.ndarray): if right.ndim == 1: right = to_series(right) elif right.ndim == 2: if right.shape == left.shape: right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[0] == left.shape[0] and right.shape[1] == 1: # Broadcast across columns right = np.broadcast_to(right, left.shape) right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[1] == left.shape[1] and right.shape[0] == 1: # Broadcast along rows right = to_series(right[0, :]) else: raise ValueError( ""Unable to coerce to DataFrame, shape "" f""must be {left.shape}: given {right.shape}"" ) elif right.ndim > 2: raise ValueError( f""Unable to coerce to Series/DataFrame, dim must be <= 2: {right.shape}"" ) elif is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame)): # GH17901 right = to_series(right) if flex is not None and isinstance(right, ABCDataFrame): if not left._indexed_same(right): if flex: left, right = left.align(right, join=""outer"", level=level, copy=False) else: raise ValueError( ""Can only compare identically-labeled DataFrame objects"" ) elif isinstance(right, ABCSeries): # axis=1 is default for DataFrame-with-Series op axis = left._get_axis_number(axis) if axis is not None else 1 left, right = left.align( right, join=""outer"", axis=axis, level=level, copy=False ) return left, right " 32389,"def main(): try: files = demisto.getArg('files') branch_name = demisto.getArg('branch') pack_name = demisto.getArg('pack') user = demisto.getArg('user') comment = demisto.getArg('comment') if not comment: comment = '' username = user.get('username') if user.get('email'): username = f'{username} ({user.get(""email"")})' # commit the files from the input for file in files: if file.get('Unzipped'): continue # create ContentFile item content_file = ContentFile(pack_name=pack_name, file=file) if content_file.content_type in ('automation', 'integration'): # split automation file to yml and script files yml_file, script_file = split_yml_file(content_file) commit_content_item(branch_name, yml_file) commit_content_item(branch_name, script_file) else: commit_content_item(branch_name, content_file) inciden_url = demisto.demistoUrls().get('investigation') # create the PR text pr_body = PR_TEMPLATE.format(username, pack_name, branch_name, inciden_url, comment) if new_files: pr_body = f'{pr_body}\n\n### New files\n- ' pr_body = pr_body + '\n- '.join(new_files) if modified_files: pr_body = f'{pr_body}\n\n### Modified files\n- ' pr_body = pr_body + '\n- '.join(modified_files) return_results(CommandResults( readable_output=pr_body, outputs_prefix='PR_text', outputs=pr_body )) except Exception as ex: demisto.error(str(ex)) # print the traceback return_error(f'Failed to execute CommitFiles script. Error: {str(traceback.format_exc())}') ","def main(): try: files = demisto.getArg('files') branch_name = demisto.getArg('branch') pack_name = demisto.getArg('pack') user = demisto.getArg('user') comment = demisto.getArg('comment') if not comment: comment = '' username = user.get('username') if user.get('email'): username = f'{username} ({user.get(""email"")})' # commit the files from the input for file in files: if file.get('Unzipped'): continue # create ContentFile item content_file = ContentFile(pack_name=pack_name, file=file) if content_file.content_type in ('automation', 'integration'): # split automation file to yml and script files yml_file, script_file = split_yml_file(content_file) commit_content_item(branch_name, yml_file) commit_content_item(branch_name, script_file) else: commit_content_item(branch_name, content_file) inciden_url = demisto.demistoUrls().get('investigation') # create the PR text pr_body = PR_TEMPLATE.format(username, pack_name, branch_name, inciden_url, comment) if new_files: pr_body = f'{pr_body}\n\n### New files\n- ' pr_body = pr_body + '\n- '.join(new_files) if modified_files: pr_body = f'{pr_body}\n\n### Modified files\n- ' pr_body = pr_body + '\n- '.join(modified_files) return_results(CommandResults( readable_output=pr_body, outputs_prefix='PR_text', outputs=pr_body )) except Exception as ex: demisto.error(str(ex)) # print the traceback return_error(f'Failed to execute CommitFiles script. Error: {ex}', error=ex) " 59945,"def main(): # Setup command line options argparser = argparse.ArgumentParser() output_options = argparser.add_argument_group( 'Options controlling ReFrame output' ) locate_options = argparser.add_argument_group( 'Options for discovering checks' ) select_options = argparser.add_argument_group( 'Options for selecting checks' ) action_options = argparser.add_argument_group( 'Options controlling actions' ) run_options = argparser.add_argument_group( 'Options controlling the execution of checks' ) env_options = argparser.add_argument_group( 'Options controlling the ReFrame environment' ) misc_options = argparser.add_argument_group('Miscellaneous options') # Output directory options output_options.add_argument( '--dont-restage', action='store_false', dest='clean_stagedir', help='Reuse the test stage directory', envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir' ) output_options.add_argument( '--keep-stage-files', action='store_true', help='Keep stage directories even for successful checks', envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files' ) output_options.add_argument( '-o', '--output', action='store', metavar='DIR', help='Set output directory prefix to DIR', envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir' ) output_options.add_argument( '--perflogdir', action='store', metavar='DIR', help=('Set performance log data directory prefix ' '(relevant only to the filelog log handler)'), envvar='RFM_PERFLOG_DIR', configvar='logging/handlers_perflog/filelog_basedir' ) output_options.add_argument( '--prefix', action='store', metavar='DIR', help='Set general directory prefix to DIR', envvar='RFM_PREFIX', configvar='systems/prefix' ) output_options.add_argument( '--report-file', action='store', metavar='FILE', help=""Store JSON run report in FILE"", envvar='RFM_REPORT_FILE', configvar='general/report_file' ) output_options.add_argument( '--report-junit', action='store', metavar='FILE', help=""Store a JUnit report in FILE"", envvar='RFM_REPORT_JUNIT', configvar='general/report_junit' ) output_options.add_argument( '-s', '--stage', action='store', metavar='DIR', help='Set stage directory prefix to DIR', envvar='RFM_STAGE_DIR', configvar='systems/stagedir' ) output_options.add_argument( '--save-log-files', action='store_true', default=False, help='Save ReFrame log files to the output directory', envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files' ) output_options.add_argument( '--timestamp', action='store', nargs='?', const='%FT%T', metavar='TIMEFMT', help=('Append a timestamp to the output and stage directory prefixes ' '(default: ""%%FT%%T"")'), envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs' ) # Check discovery options locate_options.add_argument( '-c', '--checkpath', action='append', metavar='PATH', help=""Add PATH to the check search path list"", envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path' ) locate_options.add_argument( '--ignore-check-conflicts', action='store_true', help=('Skip checks with conflicting names ' '(this option is deprecated and has no effect)'), envvar='RFM_IGNORE_CHECK_CONFLICTS', configvar='general/ignore_check_conflicts' ) locate_options.add_argument( '-R', '--recursive', action='store_true', help='Search for checks in the search path recursively', envvar='RFM_CHECK_SEARCH_RECURSIVE', configvar='general/check_search_recursive' ) # Select options select_options.add_argument( '--cpu-only', action='store_true', help='Select only CPU checks' ) select_options.add_argument( '--failed', action='store_true', help=""Select failed test cases (only when '--restore-session' is used)"" ) select_options.add_argument( '--gpu-only', action='store_true', help='Select only GPU checks' ) select_options.add_argument( '--maintainer', action='append', dest='maintainers', default=[], metavar='PATTERN', help='Select checks with at least one maintainer matching PATTERN' ) select_options.add_argument( '-n', '--name', action='append', dest='names', default=[], metavar='PATTERN', help='Select checks whose name matches PATTERN' ) # FIXME: The following is the only selection option that has an associated # (undocumented) configuration variable. This is to support pruning of the # partition environments as the runtime is created, similarly to how the # system partitions are treated. Currently, this facilitates the # implementation of fixtures, but we should reconsider it: see discussion # in https://github.com/eth-cscs/reframe/issues/2245 select_options.add_argument( '-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN', configvar='general/valid_env_names', help=('Select checks with at least one ' 'programming environment matching PATTERN') ) select_options.add_argument( '-T', '--exclude-tag', action='append', dest='exclude_tags', metavar='PATTERN', default=[], help='Exclude checks whose tag matches PATTERN' ) select_options.add_argument( '-t', '--tag', action='append', dest='tags', metavar='PATTERN', default=[], help='Select checks with at least one tag matching PATTERN' ) select_options.add_argument( '-x', '--exclude', action='append', dest='exclude_names', metavar='PATTERN', default=[], help='Exclude checks whose name matches PATTERN' ) # Action options action_options.add_argument( '--ci-generate', action='store', metavar='FILE', help=('Generate into FILE a Gitlab CI pipeline ' 'for the selected tests and exit'), ) action_options.add_argument( '--describe', action='store_true', help='Give full details on the selected tests' ) action_options.add_argument( '-L', '--list-detailed', nargs='?', const='T', choices=['C', 'T'], help=('List the selected tests (T) or the concretized test cases (C) ' 'providing more details') ) action_options.add_argument( '-l', '--list', nargs='?', const='T', choices=['C', 'T'], help='List the selected tests (T) or the concretized test cases (C)' ) action_options.add_argument( '--list-tags', action='store_true', help='List the unique tags found in the selected tests and exit' ) action_options.add_argument( '-r', '--run', action='store_true', help='Run the selected checks' ) # Run options run_options.add_argument( '--disable-hook', action='append', metavar='NAME', dest='hooks', default=[], help='Disable a pipeline hook for this run' ) run_options.add_argument( '--exec-policy', metavar='POLICY', action='store', choices=['async', 'serial'], default='async', help='Set the execution policy of ReFrame (default: ""async"")' ) run_options.add_argument( '--flex-alloc-nodes', action='store', dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None, help='Set strategy for the flexible node allocation (default: ""idle"").' ) run_options.add_argument( '--force-local', action='store_true', help='Force local execution of checks' ) run_options.add_argument( '-J', '--job-option', action='append', metavar='OPT', dest='job_options', default=[], help='Pass option OPT to job scheduler' ) run_options.add_argument( '--max-retries', metavar='NUM', action='store', default=0, help='Set the maximum number of times a failed regression test ' 'may be retried (default: 0)' ) run_options.add_argument( '--maxfail', metavar='NUM', action='store', default=sys.maxsize, help='Exit after first NUM failures' ) run_options.add_argument( '--mode', action='store', help='Execution mode to use' ) run_options.add_argument( '--restore-session', action='store', nargs='?', const='', metavar='REPORT', help='Restore a testing session from REPORT file' ) run_options.add_argument( '-S', '--setvar', action='append', metavar='[TEST.]VAR=VAL', dest='vars', default=[], help=('Set test variable VAR to VAL in all tests ' 'or optionally in TEST only') ) run_options.add_argument( '--skip-performance-check', action='store_true', help='Skip performance checking' ) run_options.add_argument( '--skip-prgenv-check', action='store_true', help='Skip programming environment check' ) run_options.add_argument( '--skip-sanity-check', action='store_true', help='Skip sanity checking' ) run_options.add_argument( '--skip-system-check', action='store_true', help='Skip system check' ) run_options.add_argument( '--strict', action='store_true', help='Enforce strict performance checking' ) # Environment options env_options.add_argument( '-M', '--map-module', action='append', metavar='MAPPING', dest='module_mappings', default=[], help='Add a module mapping', envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings' ) env_options.add_argument( '-m', '--module', action='append', default=[], metavar='MOD', dest='user_modules', help='Load module MOD before running any regression check', envvar='RFM_USER_MODULES ,', configvar='general/user_modules' ) env_options.add_argument( '--module-mappings', action='store', metavar='FILE', dest='module_map_file', help='Load module mappings from FILE', envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file' ) env_options.add_argument( '--module-path', action='append', metavar='PATH', dest='module_paths', default=[], help='(Un)use module path PATH before running any regression check', ) env_options.add_argument( '--non-default-craype', action='store_true', help='Test a non-default Cray Programming Environment', envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype' ) env_options.add_argument( '--purge-env', action='store_true', dest='purge_env', default=False, help='Unload all modules before running any regression check', envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment' ) env_options.add_argument( '-u', '--unload-module', action='append', metavar='MOD', dest='unload_modules', default=[], help='Unload module MOD before running any regression check', envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules' ) # Miscellaneous options misc_options.add_argument( '-C', '--config-file', action='store', dest='config_file', metavar='FILE', help='Set configuration file', envvar='RFM_CONFIG_FILE' ) misc_options.add_argument( '--detect-host-topology', action='store', nargs='?', const='-', help='Detect the local host topology and exit' ) misc_options.add_argument( '--failure-stats', action='store_true', help='Print failure statistics' ) misc_options.add_argument( '--nocolor', action='store_false', dest='colorize', help='Disable coloring of output', envvar='RFM_COLORIZE', configvar='general/colorize' ) misc_options.add_argument( '--performance-report', action='store_true', help='Print a report for performance tests' ) misc_options.add_argument( '--show-config', action='store', nargs='?', const='all', metavar='PARAM', help='Print the value of configuration parameter PARAM and exit' ) misc_options.add_argument( '--system', action='store', help='Load configuration for SYSTEM', envvar='RFM_SYSTEM' ) misc_options.add_argument( '--upgrade-config-file', action='store', metavar='OLD[:NEW]', help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax' ) misc_options.add_argument( '-V', '--version', action='version', version=osext.reframe_version() ) misc_options.add_argument( '-v', '--verbose', action='count', help='Increase verbosity level of output', envvar='RFM_VERBOSE', configvar='general/verbose' ) misc_options.add_argument( '-q', '--quiet', action='count', default=0, help='Decrease verbosity level of output', ) # Options not associated with command-line arguments argparser.add_argument( dest='git_timeout', envvar='RFM_GIT_TIMEOUT', configvar='general/git_timeout', help=('Timeout in seconds when checking if the url is a ' 'valid repository.') ) argparser.add_argument( dest='graylog_server', envvar='RFM_GRAYLOG_ADDRESS', configvar='logging/handlers_perflog/graylog_address', help='Graylog server address' ) argparser.add_argument( dest='httpjson_url', envvar='RFM_HTTPJSON_URL', configvar='logging/handlers_perflog/httpjson_url', help='URL of HTTP server accepting JSON logs' ) argparser.add_argument( dest='ignore_reqnodenotavail', envvar='RFM_IGNORE_REQNODENOTAVAIL', configvar='schedulers/ignore_reqnodenotavail', action='store_true', help='Graylog server address' ) argparser.add_argument( dest='compact_test_names', envvar='RFM_COMPACT_TEST_NAMES', configvar='general/compact_test_names', action='store_true', help='Use a compact test naming scheme' ) argparser.add_argument( dest='dump_pipeline_progress', envvar='RFM_DUMP_PIPELINE_PROGRESS', configvar='general/dump_pipeline_progress', action='store_true', help='Dump progress information for the async execution' ) argparser.add_argument( dest='pipeline_timeout', envvar='RFM_PIPELINE_TIMEOUT', configvar='general/pipeline_timeout', action='store', help='Timeout for advancing the pipeline' ) argparser.add_argument( dest='remote_detect', envvar='RFM_REMOTE_DETECT', configvar='general/remote_detect', action='store_true', help='Detect remote system topology' ) argparser.add_argument( dest='remote_workdir', envvar='RFM_REMOTE_WORKDIR', configvar='general/remote_workdir', action='store', help='Working directory for launching ReFrame remotely' ) argparser.add_argument( dest='resolve_module_conflicts', envvar='RFM_RESOLVE_MODULE_CONFLICTS', configvar='general/resolve_module_conflicts', action='store_true', help='Resolve module conflicts automatically' ) argparser.add_argument( dest='syslog_address', envvar='RFM_SYSLOG_ADDRESS', configvar='logging/handlers_perflog/syslog_address', help='Syslog server address' ) argparser.add_argument( dest='trap_job_errors', envvar='RFM_TRAP_JOB_ERRORS', configvar='general/trap_job_errors', action='store_true', help='Trap job errors in job scripts and fail tests automatically' ) argparser.add_argument( dest='use_login_shell', envvar='RFM_USE_LOGIN_SHELL', configvar='general/use_login_shell', action='store_true', help='Use a login shell for job scripts' ) def restrict_logging(): '''Restrict logging to errors only. This is done when specific options are passed, which generate JSON output and we don't want to pollute the output with other logging output. :returns: :obj:`True` if the logging was restricted, :obj:`False` otherwise. ''' if (options.show_config or options.detect_host_topology or options.describe): logging.getlogger().setLevel(logging.ERROR) return True else: return False # Parse command line options = argparser.parse_args() if len(sys.argv) == 1: argparser.print_help() sys.exit(1) # First configure logging with our generic configuration so as to be able # to print pretty messages; logging will be reconfigured by user's # configuration later site_config = config.load_config( os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py') ) site_config.select_subconfig('generic') options.update_config(site_config) logging.configure_logging(site_config) printer = PrettyPrinter() printer.colorize = site_config.get('general/0/colorize') if not restrict_logging(): printer.adjust_verbosity(calc_verbosity(site_config, options.quiet)) if os.getenv('RFM_GRAYLOG_SERVER'): printer.warning( 'RFM_GRAYLOG_SERVER environment variable is deprecated; ' 'please use RFM_GRAYLOG_ADDRESS instead' ) os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER') if options.upgrade_config_file is not None: old_config, *new_config = options.upgrade_config_file.split( ':', maxsplit=1 ) new_config = new_config[0] if new_config else None try: new_config = config.convert_old_config(old_config, new_config) except Exception as e: printer.error(f'could not convert file: {e}') sys.exit(1) printer.info( f'Conversion successful! ' f'The converted file can be found at {new_config!r}.' ) sys.exit(0) # Now configure ReFrame according to the user configuration file try: try: printer.debug('Loading user configuration') site_config = config.load_config(options.config_file) except warnings.ReframeDeprecationWarning as e: printer.warning(e) converted = config.convert_old_config(options.config_file) printer.warning( f""configuration file has been converted "" f""to the new syntax here: '{converted}'"" ) site_config = config.load_config(converted) site_config.validate() # We ignore errors about unresolved sections or configuration # parameters here, because they might be defined at the individual # partition level and will be caught when we will instantiating # internally the system and partitions later on. site_config.select_subconfig(options.system, ignore_resolve_errors=True) for err in options.update_config(site_config): printer.warning(str(err)) # Update options from the selected execution mode if options.mode: mode_args = site_config.get(f'modes/@{options.mode}/options') # We lexically split the mode options, because otherwise spaces # will be treated as part of the option argument; see GH bug #1554 mode_args = list(itertools.chain.from_iterable(shlex.split(m) for m in mode_args)) # Parse the mode's options and reparse the command-line options = argparser.parse_args(mode_args) options = argparser.parse_args(namespace=options.cmd_options) options.update_config(site_config) logging.configure_logging(site_config) except (OSError, errors.ConfigError) as e: printer.error(f'failed to load configuration: {e}') printer.error(logfiles_message()) sys.exit(1) printer.colorize = site_config.get('general/0/colorize') if not restrict_logging(): printer.adjust_verbosity(calc_verbosity(site_config, options.quiet)) try: printer.debug('Initializing runtime') runtime.init_runtime(site_config) except errors.ConfigError as e: printer.error(f'failed to initialize runtime: {e}') printer.error(logfiles_message()) sys.exit(1) if site_config.get('general/0/ignore_check_conflicts'): logging.getlogger().warning( ""the 'ignore_check_conflicts' option is deprecated "" ""and will be removed in the future"" ) rt = runtime.runtime() try: if site_config.get('general/0/module_map_file'): rt.modules_system.load_mapping_from_file( site_config.get('general/0/module_map_file') ) if site_config.get('general/0/module_mappings'): for m in site_config.get('general/0/module_mappings'): rt.modules_system.load_mapping(m) except (errors.ConfigError, OSError) as e: printer.error('could not load module mappings: %s' % e) sys.exit(1) if (osext.samefile(rt.stage_prefix, rt.output_prefix) and not site_config.get('general/0/keep_stage_files')): printer.error(""stage and output refer to the same directory; "" ""if this is on purpose, please use the "" ""'--keep-stage-files' option."") printer.error(logfiles_message()) sys.exit(1) # Show configuration after everything is set up if options.show_config: # Restore logging level printer.setLevel(logging.INFO) config_param = options.show_config if config_param == 'all': printer.info(str(rt.site_config)) else: # Create a unique value to differentiate between configuration # parameters with value `None` and invalid ones default = time.time() value = rt.get_option(config_param, default) if value == default: printer.error( f'no such configuration parameter found: {config_param}' ) else: printer.info(json.dumps(value, indent=2)) sys.exit(0) if options.detect_host_topology: from reframe.utility.cpuinfo import cpuinfo s_cpuinfo = cpuinfo() # Restore logging level printer.setLevel(logging.INFO) topofile = options.detect_host_topology if topofile == '-': printer.info(json.dumps(s_cpuinfo, indent=2)) else: try: with open(topofile, 'w') as fp: json.dump(s_cpuinfo, fp, indent=2) fp.write('\n') except OSError as e: getlogger().error( f'could not write topology file: {topofile!r}' ) sys.exit(1) sys.exit(0) autodetect.detect_topology() printer.debug(format_env(options.env_vars)) # Setup the check loader if options.restore_session is not None: # We need to load the failed checks only from a list of reports if options.restore_session: filenames = options.restore_session.split(',') else: filenames = [runreport.next_report_filename( osext.expandvars(site_config.get('general/0/report_file')), new=False )] report = runreport.load_report(*filenames) check_search_path = list(report.slice('filename', unique=True)) check_search_recursive = False # If `-c` or `-R` are passed explicitly outside the configuration # file, override the values set from the report file if site_config.is_sticky_option('general/check_search_path'): printer.warning( 'Ignoring check search path set in the report file: ' 'search path set explicitly in the command-line or ' 'the environment' ) check_search_path = site_config.get( 'general/0/check_search_path' ) if site_config.is_sticky_option('general/check_search_recursive'): printer.warning( 'Ignoring check search recursive option from the report file: ' 'option set explicitly in the command-line or the environment' ) check_search_recursive = site_config.get( 'general/0/check_search_recursive' ) else: check_search_recursive = site_config.get( 'general/0/check_search_recursive' ) check_search_path = site_config.get('general/0/check_search_path') # Collect any variables set from the command line external_vars = {} for expr in options.vars: try: lhs, rhs = expr.split('=', maxsplit=1) except ValueError: printer.warning( f'invalid test variable assignment: {expr!r}; skipping' ) else: external_vars[lhs] = rhs loader = RegressionCheckLoader(check_search_path, check_search_recursive, external_vars) def print_infoline(param, value): param = param + ':' printer.info(f"" {param.ljust(18)} {value}"") session_info = { 'cmdline': ' '.join(sys.argv), 'config_file': rt.site_config.filename, 'data_version': runreport.DATA_VERSION, 'hostname': socket.getfqdn(), 'prefix_output': rt.output_prefix, 'prefix_stage': rt.stage_prefix, 'user': osext.osuser(), 'version': osext.reframe_version(), 'workdir': os.getcwd(), } # Print command line printer.info(f""[ReFrame Setup]"") print_infoline('version', session_info['version']) print_infoline('command', repr(session_info['cmdline'])) print_infoline( f""launched by"", f""{session_info['user'] or ''}@{session_info['hostname']}"" ) print_infoline('working directory', repr(session_info['workdir'])) print_infoline('settings file', f""{session_info['config_file']!r}"") print_infoline('check search path', f""{'(R) ' if loader.recurse else ''}"" f""{':'.join(loader.load_path)!r}"") print_infoline('stage directory', repr(session_info['prefix_stage'])) print_infoline('output directory', repr(session_info['prefix_output'])) printer.info('') try: # Locate and load checks checks_found = loader.load_all() printer.verbose(f'Loaded {len(checks_found)} test(s)') # Generate all possible test cases first; we will need them for # resolving dependencies after filtering testcases_all = generate_testcases(checks_found, options.skip_system_check, options.skip_prgenv_check) testcases = testcases_all printer.verbose(f'Generated {len(testcases)} test case(s)') # Filter test cases by name if options.exclude_names: for name in options.exclude_names: testcases = filter(filters.have_not_name(name), testcases) if options.names: testcases = filter( filters.have_name('|'.join(options.names)), testcases ) testcases = list(testcases) printer.verbose( f'Filtering test cases(s) by name: {len(testcases)} remaining' ) # Filter test cases by tags for tag in options.exclude_tags: testcases = filter(filters.have_not_tag(tag), testcases) for tag in options.tags: testcases = filter(filters.have_tag(tag), testcases) testcases = list(testcases) printer.verbose( f'Filtering test cases(s) by tags: {len(testcases)} remaining' ) # Filter test cases by maintainers for maint in options.maintainers: testcases = filter(filters.have_maintainer(maint), testcases) # Filter test cases further if options.gpu_only and options.cpu_only: printer.error(""options `--gpu-only' and `--cpu-only' "" ""are mutually exclusive"") sys.exit(1) if options.gpu_only: testcases = filter(filters.have_gpu_only(), testcases) elif options.cpu_only: testcases = filter(filters.have_cpu_only(), testcases) testcases = list(testcases) printer.verbose( f'Filtering test cases(s) by other attributes: ' f'{len(testcases)} remaining' ) # Filter in failed cases if options.failed: if options.restore_session is None: printer.error( ""the option '--failed' can only be used "" ""in combination with the '--restore-session' option"" ) sys.exit(1) def _case_failed(t): rec = report.case(*t) if not rec: return False return (rec['result'] == 'failure' or rec['result'] == 'aborted') testcases = list(filter(_case_failed, testcases)) printer.verbose( f'Filtering successful test case(s): ' f'{len(testcases)} remaining' ) # Prepare for running printer.debug('Building and validating the full test DAG') testgraph, skipped_cases = dependencies.build_deps(testcases_all) if skipped_cases: # Some cases were skipped, so adjust testcases testcases = list(set(testcases) - set(skipped_cases)) printer.verbose( f'Filtering test case(s) due to unresolved dependencies: ' f'{len(testcases)} remaining' ) dependencies.validate_deps(testgraph) printer.debug('Full test DAG:') printer.debug(dependencies.format_deps(testgraph)) restored_cases = [] if len(testcases) != len(testcases_all): testgraph = dependencies.prune_deps( testgraph, testcases, max_depth=1 if options.restore_session is not None else None ) printer.debug('Pruned test DAG') printer.debug(dependencies.format_deps(testgraph)) if options.restore_session is not None: testgraph, restored_cases = report.restore_dangling(testgraph) testcases = dependencies.toposort( testgraph, is_subgraph=options.restore_session is not None ) printer.verbose(f'Final number of test cases: {len(testcases)}') # Disable hooks for tc in testcases: for h in options.hooks: tc.check.disable_hook(h) # Act on checks if options.describe: # Restore logging level printer.setLevel(logging.INFO) describe_checks(testcases, printer) sys.exit(0) if options.list or options.list_detailed: concretized = (options.list == 'C' or options.list_detailed == 'C') detailed = options.list_detailed is not None list_checks(testcases, printer, detailed, concretized) sys.exit(0) if options.list_tags: list_tags(testcases, printer) sys.exit(0) if options.ci_generate: list_checks(testcases, printer) printer.info('[Generate CI]') with open(options.ci_generate, 'wt') as fp: ci.emit_pipeline(fp, testcases) printer.info( f' Gitlab pipeline generated successfully ' f'in {options.ci_generate!r}.\n' ) sys.exit(0) if not options.run: printer.error(""No action option specified. Available options:\n"" "" - `-l'/`-L' for listing\n"" "" - `-r' for running\n"" "" - `--list-tags' for listing unique test tags\n"" "" - `--ci-generate' for generating a CI pipeline\n"" f""Try `{argparser.prog} -h' for more options."") sys.exit(1) # Manipulate ReFrame's environment if site_config.get('general/0/purge_environment'): rt.modules_system.unload_all() else: for m in site_config.get('general/0/unload_modules'): rt.modules_system.unload_module(**m) # Load the environment for the current system try: printer.debug(f'Loading environment for current system') runtime.loadenv(rt.system.preload_environ) except errors.EnvironError as e: printer.error(""failed to load current system's environment; "" ""please check your configuration"") printer.debug(str(e)) raise def module_use(*paths): try: rt.modules_system.searchpath_add(*paths) except errors.EnvironError as e: printer.warning(f'could not add module paths correctly') printer.debug(str(e)) def module_unuse(*paths): try: rt.modules_system.searchpath_remove(*paths) except errors.EnvironError as e: printer.warning(f'could not remove module paths correctly') printer.debug(str(e)) printer.debug('(Un)using module paths from command line') module_paths = {} for d in options.module_paths: if d.startswith('-'): module_paths.setdefault('-', []) module_paths['-'].append(d[1:]) elif d.startswith('+'): module_paths.setdefault('+', []) module_paths['+'].append(d[1:]) else: module_paths.setdefault('x', []) module_paths['x'].append(d) for op, paths in module_paths.items(): if op == '+': module_use(*paths) elif op == '-': module_unuse(*paths) else: # First empty the current module path in a portable way searchpath = [p for p in rt.modules_system.searchpath if p] if searchpath: rt.modules_system.searchpath_remove(*searchpath) # Treat `A:B` syntax as well in this case paths = itertools.chain(*(p.split(':') for p in paths)) module_use(*paths) printer.debug('Loading user modules from command line') for m in site_config.get('general/0/user_modules'): try: rt.modules_system.load_module(**m, force=True) except errors.EnvironError as e: printer.warning( f'could not load module {m[""name""]!r} correctly; ' f'skipping...' ) printer.debug(str(e)) options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle' # Run the tests # Setup the execution policy if options.exec_policy == 'serial': exec_policy = SerialExecutionPolicy() elif options.exec_policy == 'async': exec_policy = AsynchronousExecutionPolicy() else: # This should not happen, since choices are handled by # argparser printer.error(""unknown execution policy `%s': Exiting..."") sys.exit(1) exec_policy.skip_system_check = options.skip_system_check exec_policy.force_local = options.force_local exec_policy.strict_check = options.strict exec_policy.skip_sanity_check = options.skip_sanity_check exec_policy.skip_performance_check = options.skip_performance_check exec_policy.keep_stage_files = site_config.get( 'general/0/keep_stage_files' ) try: errmsg = ""invalid option for --flex-alloc-nodes: '{0}'"" sched_flex_alloc_nodes = int(options.flex_alloc_nodes) if sched_flex_alloc_nodes <= 0: raise errors.ConfigError( errmsg.format(options.flex_alloc_nodes) ) except ValueError: sched_flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes parsed_job_options = [] for opt in options.job_options: opt_split = opt.split('=', maxsplit=1) optstr = opt_split[0] valstr = opt_split[1] if len(opt_split) > 1 else '' if opt.startswith('-') or opt.startswith('#'): parsed_job_options.append(opt) elif len(optstr) == 1: parsed_job_options.append(f'-{optstr} {valstr}') else: parsed_job_options.append(f'--{optstr} {valstr}') exec_policy.sched_options = parsed_job_options try: max_retries = int(options.max_retries) except ValueError: raise errors.ConfigError( f'--max-retries is not a valid integer: {max_retries}' ) from None try: max_failures = int(options.maxfail) if max_failures < 0: raise errors.ConfigError( f'--maxfail should be a non-negative integer: ' f'{options.maxfail!r}' ) except ValueError: raise errors.ConfigError( f'--maxfail is not a valid integer: {options.maxfail!r}' ) from None runner = Runner(exec_policy, printer, max_retries, max_failures) try: time_start = time.time() session_info['time_start'] = time.strftime( '%FT%T%z', time.localtime(time_start), ) runner.runall(testcases, restored_cases) finally: time_end = time.time() session_info['time_end'] = time.strftime( '%FT%T%z', time.localtime(time_end) ) session_info['time_elapsed'] = time_end - time_start # Print a retry report if we did any retries if runner.stats.failed(run=0): printer.info(runner.stats.retry_report()) # Print a failure report if we had failures in the last run success = True if runner.stats.failed(): success = False runner.stats.print_failure_report(printer) if options.failure_stats: runner.stats.print_failure_stats(printer) if options.performance_report: printer.info(runner.stats.performance_report()) # Generate the report for this session report_file = os.path.normpath( osext.expandvars(rt.get_option('general/0/report_file')) ) basedir = os.path.dirname(report_file) if basedir: os.makedirs(basedir, exist_ok=True) # Build final JSON report run_stats = runner.stats.json() session_info.update({ 'num_cases': run_stats[0]['num_cases'], 'num_failures': run_stats[-1]['num_failures'] }) json_report = { 'session_info': session_info, 'runs': run_stats, 'restored_cases': [] } if options.restore_session is not None: for c in restored_cases: json_report['restored_cases'].append(report.case(*c)) report_file = runreport.next_report_filename(report_file) try: with open(report_file, 'w') as fp: jsonext.dump(json_report, fp, indent=2) fp.write('\n') printer.info(f'Run report saved in {report_file!r}') except OSError as e: printer.warning( f'failed to generate report in {report_file!r}: {e}' ) # Generate the junit xml report for this session junit_report_file = rt.get_option('general/0/report_junit') if junit_report_file: # Expand variables in filename junit_report_file = osext.expandvars(junit_report_file) junit_xml = runreport.junit_xml_report(json_report) try: with open(junit_report_file, 'w') as fp: runreport.junit_dump(junit_xml, fp) except OSError as e: printer.warning( f'failed to generate report in {junit_report_file!r}: ' f'{e}' ) if not success: sys.exit(1) sys.exit(0) except (Exception, KeyboardInterrupt, errors.ReframeFatalError): exc_info = sys.exc_info() tb = ''.join(traceback.format_exception(*exc_info)) printer.error(f'run session stopped: {errors.what(*exc_info)}') if errors.is_exit_request(*exc_info): # Print stack traces for exit requests only when TOO verbose printer.debug2(tb) elif errors.is_severe(*exc_info): printer.error(tb) else: printer.verbose(tb) sys.exit(1) finally: try: log_files = logging.log_files() if site_config.get('general/0/save_log_files'): log_files = logging.save_log_files(rt.output_prefix) except OSError as e: printer.error(f'could not save log file: {e}') sys.exit(1) finally: if not restrict_logging(): printer.info(logfiles_message()) ","def main(): # Setup command line options argparser = argparse.ArgumentParser() output_options = argparser.add_argument_group( 'Options controlling ReFrame output' ) locate_options = argparser.add_argument_group( 'Options for discovering checks' ) select_options = argparser.add_argument_group( 'Options for selecting checks' ) action_options = argparser.add_argument_group( 'Options controlling actions' ) run_options = argparser.add_argument_group( 'Options controlling the execution of checks' ) env_options = argparser.add_argument_group( 'Options controlling the ReFrame environment' ) misc_options = argparser.add_argument_group('Miscellaneous options') # Output directory options output_options.add_argument( '--dont-restage', action='store_false', dest='clean_stagedir', help='Reuse the test stage directory', envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir' ) output_options.add_argument( '--keep-stage-files', action='store_true', help='Keep stage directories even for successful checks', envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files' ) output_options.add_argument( '-o', '--output', action='store', metavar='DIR', help='Set output directory prefix to DIR', envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir' ) output_options.add_argument( '--perflogdir', action='store', metavar='DIR', help=('Set performance log data directory prefix ' '(relevant only to the filelog log handler)'), envvar='RFM_PERFLOG_DIR', configvar='logging/handlers_perflog/filelog_basedir' ) output_options.add_argument( '--prefix', action='store', metavar='DIR', help='Set general directory prefix to DIR', envvar='RFM_PREFIX', configvar='systems/prefix' ) output_options.add_argument( '--report-file', action='store', metavar='FILE', help=""Store JSON run report in FILE"", envvar='RFM_REPORT_FILE', configvar='general/report_file' ) output_options.add_argument( '--report-junit', action='store', metavar='FILE', help=""Store a JUnit report in FILE"", envvar='RFM_REPORT_JUNIT', configvar='general/report_junit' ) output_options.add_argument( '-s', '--stage', action='store', metavar='DIR', help='Set stage directory prefix to DIR', envvar='RFM_STAGE_DIR', configvar='systems/stagedir' ) output_options.add_argument( '--save-log-files', action='store_true', default=False, help='Save ReFrame log files to the output directory', envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files' ) output_options.add_argument( '--timestamp', action='store', nargs='?', const='%FT%T', metavar='TIMEFMT', help=('Append a timestamp to the output and stage directory prefixes ' '(default: ""%%FT%%T"")'), envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs' ) # Check discovery options locate_options.add_argument( '-c', '--checkpath', action='append', metavar='PATH', help=""Add PATH to the check search path list"", envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path' ) locate_options.add_argument( '--ignore-check-conflicts', action='store_true', help=('Skip checks with conflicting names ' '(this option is deprecated and has no effect)'), envvar='RFM_IGNORE_CHECK_CONFLICTS', configvar='general/ignore_check_conflicts' ) locate_options.add_argument( '-R', '--recursive', action='store_true', help='Search for checks in the search path recursively', envvar='RFM_CHECK_SEARCH_RECURSIVE', configvar='general/check_search_recursive' ) # Select options select_options.add_argument( '--cpu-only', action='store_true', help='Select only CPU checks' ) select_options.add_argument( '--failed', action='store_true', help=""Select failed test cases (only when '--restore-session' is used)"" ) select_options.add_argument( '--gpu-only', action='store_true', help='Select only GPU checks' ) select_options.add_argument( '--maintainer', action='append', dest='maintainers', default=[], metavar='PATTERN', help='Select checks with at least one maintainer matching PATTERN' ) select_options.add_argument( '-n', '--name', action='append', dest='names', default=[], metavar='PATTERN', help='Select checks whose name matches PATTERN' ) # FIXME: The following is the only selection option that has an associated # (undocumented) configuration variable. This is to support pruning of the # partition environments as the runtime is created, similarly to how the # system partitions are treated. Currently, this facilitates the # implementation of fixtures, but we should reconsider it: see discussion # in https://github.com/eth-cscs/reframe/issues/2245 select_options.add_argument( '-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN', configvar='general/valid_env_names', help=('Select checks with at least one ' 'programming environment matching PATTERN') ) select_options.add_argument( '-T', '--exclude-tag', action='append', dest='exclude_tags', metavar='PATTERN', default=[], help='Exclude checks whose tag matches PATTERN' ) select_options.add_argument( '-t', '--tag', action='append', dest='tags', metavar='PATTERN', default=[], help='Select checks with at least one tag matching PATTERN' ) select_options.add_argument( '-x', '--exclude', action='append', dest='exclude_names', metavar='PATTERN', default=[], help='Exclude checks whose name matches PATTERN' ) # Action options action_options.add_argument( '--ci-generate', action='store', metavar='FILE', help=('Generate into FILE a Gitlab CI pipeline ' 'for the selected tests and exit'), ) action_options.add_argument( '--describe', action='store_true', help='Give full details on the selected tests' ) action_options.add_argument( '-L', '--list-detailed', nargs='?', const='T', choices=['C', 'T'], help=('List the selected tests (T) or the concretized test cases (C) ' 'providing more details') ) action_options.add_argument( '-l', '--list', nargs='?', const='T', choices=['C', 'T'], help='List the selected tests (T) or the concretized test cases (C)' ) action_options.add_argument( '--list-tags', action='store_true', help='List the unique tags found in the selected tests and exit' ) action_options.add_argument( '-r', '--run', action='store_true', help='Run the selected checks' ) # Run options run_options.add_argument( '--disable-hook', action='append', metavar='NAME', dest='hooks', default=[], help='Disable a pipeline hook for this run' ) run_options.add_argument( '--exec-policy', metavar='POLICY', action='store', choices=['async', 'serial'], default='async', help='Set the execution policy of ReFrame (default: ""async"")' ) run_options.add_argument( '--flex-alloc-nodes', action='store', dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None, help='Set strategy for the flexible node allocation (default: ""idle"").' ) run_options.add_argument( '--force-local', action='store_true', help='Force local execution of checks' ) run_options.add_argument( '-J', '--job-option', action='append', metavar='OPT', dest='job_options', default=[], help='Pass option OPT to job scheduler' ) run_options.add_argument( '--max-retries', metavar='NUM', action='store', default=0, help='Set the maximum number of times a failed regression test ' 'may be retried (default: 0)' ) run_options.add_argument( '--maxfail', metavar='NUM', action='store', default=sys.maxsize, help='Exit after first NUM failures' ) run_options.add_argument( '--mode', action='store', help='Execution mode to use' ) run_options.add_argument( '--restore-session', action='store', nargs='?', const='', metavar='REPORT', help='Restore a testing session from REPORT file' ) run_options.add_argument( '-S', '--setvar', action='append', metavar='[TEST.]VAR=VAL', dest='vars', default=[], help=('Set test variable VAR to VAL in all tests ' 'or optionally in TEST only') ) run_options.add_argument( '--skip-performance-check', action='store_true', help='Skip performance checking' ) run_options.add_argument( '--skip-prgenv-check', action='store_true', help='Skip programming environment check' ) run_options.add_argument( '--skip-sanity-check', action='store_true', help='Skip sanity checking' ) run_options.add_argument( '--skip-system-check', action='store_true', help='Skip system check' ) run_options.add_argument( '--strict', action='store_true', help='Enforce strict performance checking' ) # Environment options env_options.add_argument( '-M', '--map-module', action='append', metavar='MAPPING', dest='module_mappings', default=[], help='Add a module mapping', envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings' ) env_options.add_argument( '-m', '--module', action='append', default=[], metavar='MOD', dest='user_modules', help='Load module MOD before running any regression check', envvar='RFM_USER_MODULES ,', configvar='general/user_modules' ) env_options.add_argument( '--module-mappings', action='store', metavar='FILE', dest='module_map_file', help='Load module mappings from FILE', envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file' ) env_options.add_argument( '--module-path', action='append', metavar='PATH', dest='module_paths', default=[], help='(Un)use module path PATH before running any regression check', ) env_options.add_argument( '--non-default-craype', action='store_true', help='Test a non-default Cray Programming Environment', envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype' ) env_options.add_argument( '--purge-env', action='store_true', dest='purge_env', default=False, help='Unload all modules before running any regression check', envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment' ) env_options.add_argument( '-u', '--unload-module', action='append', metavar='MOD', dest='unload_modules', default=[], help='Unload module MOD before running any regression check', envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules' ) # Miscellaneous options misc_options.add_argument( '-C', '--config-file', action='store', dest='config_file', metavar='FILE', help='Set configuration file', envvar='RFM_CONFIG_FILE' ) misc_options.add_argument( '--detect-host-topology', action='store', nargs='?', const='-', help='Detect the local host topology and exit' ) misc_options.add_argument( '--failure-stats', action='store_true', help='Print failure statistics' ) misc_options.add_argument( '--nocolor', action='store_false', dest='colorize', help='Disable coloring of output', envvar='RFM_COLORIZE', configvar='general/colorize' ) misc_options.add_argument( '--performance-report', action='store_true', help='Print a report for performance tests' ) misc_options.add_argument( '--show-config', action='store', nargs='?', const='all', metavar='PARAM', help='Print the value of configuration parameter PARAM and exit' ) misc_options.add_argument( '--system', action='store', help='Load configuration for SYSTEM', envvar='RFM_SYSTEM' ) misc_options.add_argument( '--upgrade-config-file', action='store', metavar='OLD[:NEW]', help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax' ) misc_options.add_argument( '-V', '--version', action='version', version=osext.reframe_version() ) misc_options.add_argument( '-v', '--verbose', action='count', help='Increase verbosity level of output', envvar='RFM_VERBOSE', configvar='general/verbose' ) misc_options.add_argument( '-q', '--quiet', action='count', default=0, help='Decrease verbosity level of output', ) # Options not associated with command-line arguments argparser.add_argument( dest='git_timeout', envvar='RFM_GIT_TIMEOUT', configvar='general/git_timeout', help=('Timeout in seconds when checking if the url is a ' 'valid repository.') ) argparser.add_argument( dest='graylog_server', envvar='RFM_GRAYLOG_ADDRESS', configvar='logging/handlers_perflog/graylog_address', help='Graylog server address' ) argparser.add_argument( dest='httpjson_url', envvar='RFM_HTTPJSON_URL', configvar='logging/handlers_perflog/httpjson_url', help='URL of HTTP server accepting JSON logs' ) argparser.add_argument( dest='ignore_reqnodenotavail', envvar='RFM_IGNORE_REQNODENOTAVAIL', configvar='schedulers/ignore_reqnodenotavail', action='store_true', help='Graylog server address' ) argparser.add_argument( dest='compact_test_names', envvar='RFM_COMPACT_TEST_NAMES', configvar='general/compact_test_names', action='store_true', help='Use a compact test naming scheme' ) argparser.add_argument( dest='dump_pipeline_progress', envvar='RFM_DUMP_PIPELINE_PROGRESS', configvar='general/dump_pipeline_progress', action='store_true', help='Dump progress information for the async execution' ) argparser.add_argument( dest='pipeline_timeout', envvar='RFM_PIPELINE_TIMEOUT', configvar='general/pipeline_timeout', action='store', help='Timeout for advancing the pipeline' ) argparser.add_argument( dest='remote_detect', envvar='RFM_REMOTE_DETECT', configvar='general/remote_detect', action='store_true', help='Detect remote system topology' ) argparser.add_argument( dest='remote_workdir', envvar='RFM_REMOTE_WORKDIR', configvar='general/remote_workdir', action='store', help='Working directory for launching ReFrame remotely' ) argparser.add_argument( dest='resolve_module_conflicts', envvar='RFM_RESOLVE_MODULE_CONFLICTS', configvar='general/resolve_module_conflicts', action='store_true', help='Resolve module conflicts automatically' ) argparser.add_argument( dest='syslog_address', envvar='RFM_SYSLOG_ADDRESS', configvar='logging/handlers_perflog/syslog_address', help='Syslog server address' ) argparser.add_argument( dest='trap_job_errors', envvar='RFM_TRAP_JOB_ERRORS', configvar='general/trap_job_errors', action='store_true', help='Trap job errors in job scripts and fail tests automatically' ) argparser.add_argument( dest='use_login_shell', envvar='RFM_USE_LOGIN_SHELL', configvar='general/use_login_shell', action='store_true', help='Use a login shell for job scripts' ) def restrict_logging(): '''Restrict logging to errors only. This is done when specific options are passed, which generate JSON output and we don't want to pollute the output with other logging output. :returns: :obj:`True` if the logging was restricted, :obj:`False` otherwise. ''' if (options.show_config or options.detect_host_topology or options.describe): logging.getlogger().setLevel(logging.ERROR) return True else: return False # Parse command line options = argparser.parse_args() if len(sys.argv) == 1: argparser.print_help() sys.exit(1) # First configure logging with our generic configuration so as to be able # to print pretty messages; logging will be reconfigured by user's # configuration later site_config = config.load_config( os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py') ) site_config.select_subconfig('generic') options.update_config(site_config) logging.configure_logging(site_config) printer = PrettyPrinter() printer.colorize = site_config.get('general/0/colorize') if not restrict_logging(): printer.adjust_verbosity(calc_verbosity(site_config, options.quiet)) if os.getenv('RFM_GRAYLOG_SERVER'): printer.warning( 'RFM_GRAYLOG_SERVER environment variable is deprecated; ' 'please use RFM_GRAYLOG_ADDRESS instead' ) os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER') if options.upgrade_config_file is not None: old_config, *new_config = options.upgrade_config_file.split( ':', maxsplit=1 ) new_config = new_config[0] if new_config else None try: new_config = config.convert_old_config(old_config, new_config) except Exception as e: printer.error(f'could not convert file: {e}') sys.exit(1) printer.info( f'Conversion successful! ' f'The converted file can be found at {new_config!r}.' ) sys.exit(0) # Now configure ReFrame according to the user configuration file try: try: printer.debug('Loading user configuration') site_config = config.load_config(options.config_file) except warnings.ReframeDeprecationWarning as e: printer.warning(e) converted = config.convert_old_config(options.config_file) printer.warning( f""configuration file has been converted "" f""to the new syntax here: '{converted}'"" ) site_config = config.load_config(converted) site_config.validate() # We ignore errors about unresolved sections or configuration # parameters here, because they might be defined at the individual # partition level and will be caught when we will instantiating # internally the system and partitions later on. site_config.select_subconfig(options.system, ignore_resolve_errors=True) for err in options.update_config(site_config): printer.warning(str(err)) # Update options from the selected execution mode if options.mode: mode_args = site_config.get(f'modes/@{options.mode}/options') # We lexically split the mode options, because otherwise spaces # will be treated as part of the option argument; see GH bug #1554 mode_args = list(itertools.chain.from_iterable(shlex.split(m) for m in mode_args)) # Parse the mode's options and reparse the command-line options = argparser.parse_args(mode_args) options = argparser.parse_args(namespace=options.cmd_options) options.update_config(site_config) logging.configure_logging(site_config) except (OSError, errors.ConfigError) as e: printer.error(f'failed to load configuration: {e}') printer.error(logfiles_message()) sys.exit(1) printer.colorize = site_config.get('general/0/colorize') if not restrict_logging(): printer.adjust_verbosity(calc_verbosity(site_config, options.quiet)) try: printer.debug('Initializing runtime') runtime.init_runtime(site_config) except errors.ConfigError as e: printer.error(f'failed to initialize runtime: {e}') printer.error(logfiles_message()) sys.exit(1) if site_config.get('general/0/ignore_check_conflicts'): logging.getlogger().warning( ""the 'ignore_check_conflicts' option is deprecated "" ""and will be removed in the future"" ) rt = runtime.runtime() try: if site_config.get('general/0/module_map_file'): rt.modules_system.load_mapping_from_file( site_config.get('general/0/module_map_file') ) if site_config.get('general/0/module_mappings'): for m in site_config.get('general/0/module_mappings'): rt.modules_system.load_mapping(m) except (errors.ConfigError, OSError) as e: printer.error('could not load module mappings: %s' % e) sys.exit(1) if (osext.samefile(rt.stage_prefix, rt.output_prefix) and not site_config.get('general/0/keep_stage_files')): printer.error(""stage and output refer to the same directory; "" ""if this is on purpose, please use the "" ""'--keep-stage-files' option."") printer.error(logfiles_message()) sys.exit(1) # Show configuration after everything is set up if options.show_config: # Restore logging level printer.setLevel(logging.INFO) config_param = options.show_config if config_param == 'all': printer.info(str(rt.site_config)) else: # Create a unique value to differentiate between configuration # parameters with value `None` and invalid ones default = {'token'} value = rt.get_option(config_param, default) if value is default: printer.error( f'no such configuration parameter found: {config_param}' ) else: printer.info(json.dumps(value, indent=2)) sys.exit(0) if options.detect_host_topology: from reframe.utility.cpuinfo import cpuinfo s_cpuinfo = cpuinfo() # Restore logging level printer.setLevel(logging.INFO) topofile = options.detect_host_topology if topofile == '-': printer.info(json.dumps(s_cpuinfo, indent=2)) else: try: with open(topofile, 'w') as fp: json.dump(s_cpuinfo, fp, indent=2) fp.write('\n') except OSError as e: getlogger().error( f'could not write topology file: {topofile!r}' ) sys.exit(1) sys.exit(0) autodetect.detect_topology() printer.debug(format_env(options.env_vars)) # Setup the check loader if options.restore_session is not None: # We need to load the failed checks only from a list of reports if options.restore_session: filenames = options.restore_session.split(',') else: filenames = [runreport.next_report_filename( osext.expandvars(site_config.get('general/0/report_file')), new=False )] report = runreport.load_report(*filenames) check_search_path = list(report.slice('filename', unique=True)) check_search_recursive = False # If `-c` or `-R` are passed explicitly outside the configuration # file, override the values set from the report file if site_config.is_sticky_option('general/check_search_path'): printer.warning( 'Ignoring check search path set in the report file: ' 'search path set explicitly in the command-line or ' 'the environment' ) check_search_path = site_config.get( 'general/0/check_search_path' ) if site_config.is_sticky_option('general/check_search_recursive'): printer.warning( 'Ignoring check search recursive option from the report file: ' 'option set explicitly in the command-line or the environment' ) check_search_recursive = site_config.get( 'general/0/check_search_recursive' ) else: check_search_recursive = site_config.get( 'general/0/check_search_recursive' ) check_search_path = site_config.get('general/0/check_search_path') # Collect any variables set from the command line external_vars = {} for expr in options.vars: try: lhs, rhs = expr.split('=', maxsplit=1) except ValueError: printer.warning( f'invalid test variable assignment: {expr!r}; skipping' ) else: external_vars[lhs] = rhs loader = RegressionCheckLoader(check_search_path, check_search_recursive, external_vars) def print_infoline(param, value): param = param + ':' printer.info(f"" {param.ljust(18)} {value}"") session_info = { 'cmdline': ' '.join(sys.argv), 'config_file': rt.site_config.filename, 'data_version': runreport.DATA_VERSION, 'hostname': socket.getfqdn(), 'prefix_output': rt.output_prefix, 'prefix_stage': rt.stage_prefix, 'user': osext.osuser(), 'version': osext.reframe_version(), 'workdir': os.getcwd(), } # Print command line printer.info(f""[ReFrame Setup]"") print_infoline('version', session_info['version']) print_infoline('command', repr(session_info['cmdline'])) print_infoline( f""launched by"", f""{session_info['user'] or ''}@{session_info['hostname']}"" ) print_infoline('working directory', repr(session_info['workdir'])) print_infoline('settings file', f""{session_info['config_file']!r}"") print_infoline('check search path', f""{'(R) ' if loader.recurse else ''}"" f""{':'.join(loader.load_path)!r}"") print_infoline('stage directory', repr(session_info['prefix_stage'])) print_infoline('output directory', repr(session_info['prefix_output'])) printer.info('') try: # Locate and load checks checks_found = loader.load_all() printer.verbose(f'Loaded {len(checks_found)} test(s)') # Generate all possible test cases first; we will need them for # resolving dependencies after filtering testcases_all = generate_testcases(checks_found, options.skip_system_check, options.skip_prgenv_check) testcases = testcases_all printer.verbose(f'Generated {len(testcases)} test case(s)') # Filter test cases by name if options.exclude_names: for name in options.exclude_names: testcases = filter(filters.have_not_name(name), testcases) if options.names: testcases = filter( filters.have_name('|'.join(options.names)), testcases ) testcases = list(testcases) printer.verbose( f'Filtering test cases(s) by name: {len(testcases)} remaining' ) # Filter test cases by tags for tag in options.exclude_tags: testcases = filter(filters.have_not_tag(tag), testcases) for tag in options.tags: testcases = filter(filters.have_tag(tag), testcases) testcases = list(testcases) printer.verbose( f'Filtering test cases(s) by tags: {len(testcases)} remaining' ) # Filter test cases by maintainers for maint in options.maintainers: testcases = filter(filters.have_maintainer(maint), testcases) # Filter test cases further if options.gpu_only and options.cpu_only: printer.error(""options `--gpu-only' and `--cpu-only' "" ""are mutually exclusive"") sys.exit(1) if options.gpu_only: testcases = filter(filters.have_gpu_only(), testcases) elif options.cpu_only: testcases = filter(filters.have_cpu_only(), testcases) testcases = list(testcases) printer.verbose( f'Filtering test cases(s) by other attributes: ' f'{len(testcases)} remaining' ) # Filter in failed cases if options.failed: if options.restore_session is None: printer.error( ""the option '--failed' can only be used "" ""in combination with the '--restore-session' option"" ) sys.exit(1) def _case_failed(t): rec = report.case(*t) if not rec: return False return (rec['result'] == 'failure' or rec['result'] == 'aborted') testcases = list(filter(_case_failed, testcases)) printer.verbose( f'Filtering successful test case(s): ' f'{len(testcases)} remaining' ) # Prepare for running printer.debug('Building and validating the full test DAG') testgraph, skipped_cases = dependencies.build_deps(testcases_all) if skipped_cases: # Some cases were skipped, so adjust testcases testcases = list(set(testcases) - set(skipped_cases)) printer.verbose( f'Filtering test case(s) due to unresolved dependencies: ' f'{len(testcases)} remaining' ) dependencies.validate_deps(testgraph) printer.debug('Full test DAG:') printer.debug(dependencies.format_deps(testgraph)) restored_cases = [] if len(testcases) != len(testcases_all): testgraph = dependencies.prune_deps( testgraph, testcases, max_depth=1 if options.restore_session is not None else None ) printer.debug('Pruned test DAG') printer.debug(dependencies.format_deps(testgraph)) if options.restore_session is not None: testgraph, restored_cases = report.restore_dangling(testgraph) testcases = dependencies.toposort( testgraph, is_subgraph=options.restore_session is not None ) printer.verbose(f'Final number of test cases: {len(testcases)}') # Disable hooks for tc in testcases: for h in options.hooks: tc.check.disable_hook(h) # Act on checks if options.describe: # Restore logging level printer.setLevel(logging.INFO) describe_checks(testcases, printer) sys.exit(0) if options.list or options.list_detailed: concretized = (options.list == 'C' or options.list_detailed == 'C') detailed = options.list_detailed is not None list_checks(testcases, printer, detailed, concretized) sys.exit(0) if options.list_tags: list_tags(testcases, printer) sys.exit(0) if options.ci_generate: list_checks(testcases, printer) printer.info('[Generate CI]') with open(options.ci_generate, 'wt') as fp: ci.emit_pipeline(fp, testcases) printer.info( f' Gitlab pipeline generated successfully ' f'in {options.ci_generate!r}.\n' ) sys.exit(0) if not options.run: printer.error(""No action option specified. Available options:\n"" "" - `-l'/`-L' for listing\n"" "" - `-r' for running\n"" "" - `--list-tags' for listing unique test tags\n"" "" - `--ci-generate' for generating a CI pipeline\n"" f""Try `{argparser.prog} -h' for more options."") sys.exit(1) # Manipulate ReFrame's environment if site_config.get('general/0/purge_environment'): rt.modules_system.unload_all() else: for m in site_config.get('general/0/unload_modules'): rt.modules_system.unload_module(**m) # Load the environment for the current system try: printer.debug(f'Loading environment for current system') runtime.loadenv(rt.system.preload_environ) except errors.EnvironError as e: printer.error(""failed to load current system's environment; "" ""please check your configuration"") printer.debug(str(e)) raise def module_use(*paths): try: rt.modules_system.searchpath_add(*paths) except errors.EnvironError as e: printer.warning(f'could not add module paths correctly') printer.debug(str(e)) def module_unuse(*paths): try: rt.modules_system.searchpath_remove(*paths) except errors.EnvironError as e: printer.warning(f'could not remove module paths correctly') printer.debug(str(e)) printer.debug('(Un)using module paths from command line') module_paths = {} for d in options.module_paths: if d.startswith('-'): module_paths.setdefault('-', []) module_paths['-'].append(d[1:]) elif d.startswith('+'): module_paths.setdefault('+', []) module_paths['+'].append(d[1:]) else: module_paths.setdefault('x', []) module_paths['x'].append(d) for op, paths in module_paths.items(): if op == '+': module_use(*paths) elif op == '-': module_unuse(*paths) else: # First empty the current module path in a portable way searchpath = [p for p in rt.modules_system.searchpath if p] if searchpath: rt.modules_system.searchpath_remove(*searchpath) # Treat `A:B` syntax as well in this case paths = itertools.chain(*(p.split(':') for p in paths)) module_use(*paths) printer.debug('Loading user modules from command line') for m in site_config.get('general/0/user_modules'): try: rt.modules_system.load_module(**m, force=True) except errors.EnvironError as e: printer.warning( f'could not load module {m[""name""]!r} correctly; ' f'skipping...' ) printer.debug(str(e)) options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle' # Run the tests # Setup the execution policy if options.exec_policy == 'serial': exec_policy = SerialExecutionPolicy() elif options.exec_policy == 'async': exec_policy = AsynchronousExecutionPolicy() else: # This should not happen, since choices are handled by # argparser printer.error(""unknown execution policy `%s': Exiting..."") sys.exit(1) exec_policy.skip_system_check = options.skip_system_check exec_policy.force_local = options.force_local exec_policy.strict_check = options.strict exec_policy.skip_sanity_check = options.skip_sanity_check exec_policy.skip_performance_check = options.skip_performance_check exec_policy.keep_stage_files = site_config.get( 'general/0/keep_stage_files' ) try: errmsg = ""invalid option for --flex-alloc-nodes: '{0}'"" sched_flex_alloc_nodes = int(options.flex_alloc_nodes) if sched_flex_alloc_nodes <= 0: raise errors.ConfigError( errmsg.format(options.flex_alloc_nodes) ) except ValueError: sched_flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes parsed_job_options = [] for opt in options.job_options: opt_split = opt.split('=', maxsplit=1) optstr = opt_split[0] valstr = opt_split[1] if len(opt_split) > 1 else '' if opt.startswith('-') or opt.startswith('#'): parsed_job_options.append(opt) elif len(optstr) == 1: parsed_job_options.append(f'-{optstr} {valstr}') else: parsed_job_options.append(f'--{optstr} {valstr}') exec_policy.sched_options = parsed_job_options try: max_retries = int(options.max_retries) except ValueError: raise errors.ConfigError( f'--max-retries is not a valid integer: {max_retries}' ) from None try: max_failures = int(options.maxfail) if max_failures < 0: raise errors.ConfigError( f'--maxfail should be a non-negative integer: ' f'{options.maxfail!r}' ) except ValueError: raise errors.ConfigError( f'--maxfail is not a valid integer: {options.maxfail!r}' ) from None runner = Runner(exec_policy, printer, max_retries, max_failures) try: time_start = time.time() session_info['time_start'] = time.strftime( '%FT%T%z', time.localtime(time_start), ) runner.runall(testcases, restored_cases) finally: time_end = time.time() session_info['time_end'] = time.strftime( '%FT%T%z', time.localtime(time_end) ) session_info['time_elapsed'] = time_end - time_start # Print a retry report if we did any retries if runner.stats.failed(run=0): printer.info(runner.stats.retry_report()) # Print a failure report if we had failures in the last run success = True if runner.stats.failed(): success = False runner.stats.print_failure_report(printer) if options.failure_stats: runner.stats.print_failure_stats(printer) if options.performance_report: printer.info(runner.stats.performance_report()) # Generate the report for this session report_file = os.path.normpath( osext.expandvars(rt.get_option('general/0/report_file')) ) basedir = os.path.dirname(report_file) if basedir: os.makedirs(basedir, exist_ok=True) # Build final JSON report run_stats = runner.stats.json() session_info.update({ 'num_cases': run_stats[0]['num_cases'], 'num_failures': run_stats[-1]['num_failures'] }) json_report = { 'session_info': session_info, 'runs': run_stats, 'restored_cases': [] } if options.restore_session is not None: for c in restored_cases: json_report['restored_cases'].append(report.case(*c)) report_file = runreport.next_report_filename(report_file) try: with open(report_file, 'w') as fp: jsonext.dump(json_report, fp, indent=2) fp.write('\n') printer.info(f'Run report saved in {report_file!r}') except OSError as e: printer.warning( f'failed to generate report in {report_file!r}: {e}' ) # Generate the junit xml report for this session junit_report_file = rt.get_option('general/0/report_junit') if junit_report_file: # Expand variables in filename junit_report_file = osext.expandvars(junit_report_file) junit_xml = runreport.junit_xml_report(json_report) try: with open(junit_report_file, 'w') as fp: runreport.junit_dump(junit_xml, fp) except OSError as e: printer.warning( f'failed to generate report in {junit_report_file!r}: ' f'{e}' ) if not success: sys.exit(1) sys.exit(0) except (Exception, KeyboardInterrupt, errors.ReframeFatalError): exc_info = sys.exc_info() tb = ''.join(traceback.format_exception(*exc_info)) printer.error(f'run session stopped: {errors.what(*exc_info)}') if errors.is_exit_request(*exc_info): # Print stack traces for exit requests only when TOO verbose printer.debug2(tb) elif errors.is_severe(*exc_info): printer.error(tb) else: printer.verbose(tb) sys.exit(1) finally: try: log_files = logging.log_files() if site_config.get('general/0/save_log_files'): log_files = logging.save_log_files(rt.output_prefix) except OSError as e: printer.error(f'could not save log file: {e}') sys.exit(1) finally: if not restrict_logging(): printer.info(logfiles_message()) " 34450,"def crf_decode_backward( inputs: TensorLike, scores: TensorLike, state: TensorLike ) -> Tuple[tf.Tensor, tf.Tensor]: """"""Computes backward decoding in a linear-chain CRF. Args: inputs: A [batch_size, num_tags] matrix of backpointer of next step (in time order). scores: A [batch_size, num_tags] matrix of scores of next step (in time order). state: A [batch_size, 1] matrix of tag index of next step. Returns: new_tags: A [batch_size, num_tags] tensor containing the new tag indices. new_scores: A [batch_size, num_tags] tensor containing the new score values. """""" inputs = tf.transpose(inputs, [1, 0, 2]) scores = tf.transpose(scores, [1, 0, 2]) def _scan_fn(state, inputs): state = tf.cast(tf.squeeze(state, axis=[1]), dtype=tf.int32) idxs = tf.stack([tf.range(tf.shape(inputs)[0]), state], axis=1) new_tags = tf.expand_dims(tf.gather_nd(inputs, idxs), axis=-1) return new_tags output_tags = tf.scan(_scan_fn, inputs, state) state = tf.cast(state, dtype=tf.float32) output_scores = tf.scan(_scan_fn, scores, state) return tf.transpose(output_tags, [1, 0, 2]), tf.transpose(output_scores, [1, 0, 2]) ","def crf_decode_backward( backpointers: TensorLike, scores: TensorLike, state: TensorLike ) -> Tuple[tf.Tensor, tf.Tensor]: """"""Computes backward decoding in a linear-chain CRF. Args: inputs: A [batch_size, num_tags] matrix of backpointer of next step (in time order). scores: A [batch_size, num_tags] matrix of scores of next step (in time order). state: A [batch_size, 1] matrix of tag index of next step. Returns: new_tags: A [batch_size, num_tags] tensor containing the new tag indices. new_scores: A [batch_size, num_tags] tensor containing the new score values. """""" inputs = tf.transpose(inputs, [1, 0, 2]) scores = tf.transpose(scores, [1, 0, 2]) def _scan_fn(state, inputs): state = tf.cast(tf.squeeze(state, axis=[1]), dtype=tf.int32) idxs = tf.stack([tf.range(tf.shape(inputs)[0]), state], axis=1) new_tags = tf.expand_dims(tf.gather_nd(inputs, idxs), axis=-1) return new_tags output_tags = tf.scan(_scan_fn, inputs, state) state = tf.cast(state, dtype=tf.float32) output_scores = tf.scan(_scan_fn, scores, state) return tf.transpose(output_tags, [1, 0, 2]), tf.transpose(output_scores, [1, 0, 2]) " 41497,"def upperlimit(data, model, scan, return_results=False): ''' Calculate an upper limit interval (0,poi_up) for a single Parameter of Interest (POI) using a fixed scan through POI-space. Args: data (tensor): the observed data model (pyhf.Model): the statistical model scan (Iterable): iterable of poi values return_results (bool): whether to return the per-point results Returns: observed limit (tensor) expected limit (tensor) scan results (tuple (tensor, tensor)) ''' tb, _ = get_backend() results = [hypotest(mu, data, model, return_expected_set=True) for mu in scan] obs = tb.astensor([[r[0][0]] for r in results]) exp = tb.astensor([[r[1][i][0] for i in range(5)] for r in results]) resarary = tb.concatenate([obs, exp], axis=1).T limits = [_interp(0.05, resarary[i][::-1], scan[::-1]) for i in range(6)] if return_results: return limits[0], limits[1:], (scan, results) return limits[0], limits[1:] ","def upperlimit(data, model, scan, alpha=0.05, return_results=False): ''' Calculate an upper limit interval (0,poi_up) for a single Parameter of Interest (POI) using a fixed scan through POI-space. Args: data (tensor): the observed data model (pyhf.Model): the statistical model scan (Iterable): iterable of poi values return_results (bool): whether to return the per-point results Returns: observed limit (tensor) expected limit (tensor) scan results (tuple (tensor, tensor)) ''' tb, _ = get_backend() results = [hypotest(mu, data, model, return_expected_set=True) for mu in scan] obs = tb.astensor([[r[0][0]] for r in results]) exp = tb.astensor([[r[1][i][0] for i in range(5)] for r in results]) resarary = tb.concatenate([obs, exp], axis=1).T limits = [_interp(0.05, resarary[i][::-1], scan[::-1]) for i in range(6)] if return_results: return limits[0], limits[1:], (scan, results) return limits[0], limits[1:] " 44043,"def generate_taus(nullspace, num_qubits): """"""Generate generators tau from the nullspace Args: nullspace (list): kernel of the binary matrix corresponding to the Hamiltonian. num_qubits (int): number of wires required to define the Hamiltonian. Returns: generators (list): list of generators of symmetries, taus, for the Hamiltonian. """""" generators = [] for null_vector in nullspace: tau = qml.Identity(0) for idx, op in enumerate( zip(null_vector[:num_qubits], null_vector[num_qubits:]) ): x, z = op if x == 0 and z == 0: tau @= qml.Identity(idx) elif x == 1 and z == 0: tau @= qml.PauliX(idx) elif x == 1 and z == 1: tau @= qml.PauliY(idx) else: tau @= qml.PauliZ(idx) ham = qml.Hamiltonian([1.0], [tau], simplify=True) generators.append(ham) return generators ","def generate_taus(nullspace, num_qubits): """"""Compute the tau generators from the nullspace of the binary matrix form of a Hamiltonian Args: nullspace (list): kernel of the binary matrix corresponding to the Hamiltonian. num_qubits (int): number of wires required to define the Hamiltonian. Returns: generators (list): list of generators of symmetries, taus, for the Hamiltonian. """""" generators = [] for null_vector in nullspace: tau = qml.Identity(0) for idx, op in enumerate( zip(null_vector[:num_qubits], null_vector[num_qubits:]) ): x, z = op if x == 0 and z == 0: tau @= qml.Identity(idx) elif x == 1 and z == 0: tau @= qml.PauliX(idx) elif x == 1 and z == 1: tau @= qml.PauliY(idx) else: tau @= qml.PauliZ(idx) ham = qml.Hamiltonian([1.0], [tau], simplify=True) generators.append(ham) return generators " 4554,"def _short_repr(niimg_rep, shorten=True, truncate=20): """"""Gives a shorten version on niimg representation """""" if not shorten: return niimg_rep if len(niimg_rep) > truncate: # Shorten the repr to have a useful error message return niimg_rep[:(truncate-2)] + '...' return niimg_rep ","def _short_repr(niimg_rep, shorten=True, truncate=20): """"""Gives a shorten version on niimg representation """""" if not shorten: return niimg_rep if len(niimg_rep) > truncate: # Shorten the repr to have a useful error message return niimg_rep[: (truncate - 2)] + '...' return niimg_rep " 31556,"def create_ticket_context(data: dict, additional_fields: list = None) -> Any: """"""Create ticket context. Args: data: ticket data. additional_fields: additional fields to extract from the ticket Returns: ticket context. """""" context = { 'ID': data.get('sys_id'), 'Summary': data.get('short_description'), 'Number': data.get('number'), 'CreatedOn': data.get('sys_created_on'), 'Active': data.get('active'), 'AdditionalComments': data.get('comments'), 'CloseCode': data.get('close_code'), 'OpenedAt': data.get('opened_at') } if additional_fields: for additional_field in additional_fields: if additional_field in data.keys() and camelize_string(additional_field) not in context.keys(): context[additional_field] = data.get(additional_field) # These fields refer to records in the database, the value is their system ID. closed_by = data.get('closed_by') if closed_by and type(closed_by) == dict: context['ResolvedBy'] = closed_by.get('value', '') opened_by = data.get('opened_by') if opened_by and type(opened_by) == dict: context['OpenedBy'] = opened_by.get('value', '') context['Creator'] = opened_by.get('value', '') assigned_to = data.get('assigned_to') if assigned_to and type(assigned_to) == dict: context['Assignee'] = assigned_to.get('value', '') # Try to map fields priority = data.get('priority') if priority: context['Priority'] = TICKET_PRIORITY.get(priority, priority) state = data.get('state') if state: context['State'] = state return createContext(context, removeNull=True) ","def create_ticket_context(data: dict, additional_fields: list = None) -> Any: """"""Create ticket context. Args: data: ticket data. additional_fields: additional fields to extract from the ticket Returns: ticket context. """""" context = { 'ID': data.get('sys_id'), 'Summary': data.get('short_description'), 'Number': data.get('number'), 'CreatedOn': data.get('sys_created_on'), 'Active': data.get('active'), 'AdditionalComments': data.get('comments'), 'CloseCode': data.get('close_code'), 'OpenedAt': data.get('opened_at') } if additional_fields: for additional_field in additional_fields: if additional_field in data.keys() and camelize_string(additional_field) not in context.keys(): context[additional_field] = data.get(additional_field) # These fields refer to records in the database, the value is their system ID. closed_by = data.get('closed_by') if closed_by and isinstance(closed_by, dict): context['ResolvedBy'] = closed_by.get('value', '') opened_by = data.get('opened_by') if opened_by and type(opened_by) == dict: context['OpenedBy'] = opened_by.get('value', '') context['Creator'] = opened_by.get('value', '') assigned_to = data.get('assigned_to') if assigned_to and type(assigned_to) == dict: context['Assignee'] = assigned_to.get('value', '') # Try to map fields priority = data.get('priority') if priority: context['Priority'] = TICKET_PRIORITY.get(priority, priority) state = data.get('state') if state: context['State'] = state return createContext(context, removeNull=True) " 36510,"def _find_rteq(a, l, x): """"""Locate the rightmost value exactly equal to x"""""" i = bisect_right(a, x, lo=l) if i != (len(a)+1) and a[i-1] == x: return i-1 raise ValueError ","def _find_rteq(a, l, x): 'Locate the rightmost value exactly equal to x' i = bisect_right(a, x, lo=l) if i != (len(a)+1) and a[i-1] == x: return i-1 raise ValueError " 22829,"def set_correct_day_from_settings(date_obj, settings, current_day=None): """""" Set correct day according to `PREFER_DAY_OF_MONTH` setting."""""" options = { 'first': 1, 'last': get_last_day_of_month(date_obj.year, date_obj.month), 'current': current_day or datetime.now().day } try: return date_obj.replace(day=options[settings.PREFER_DAY_OF_MONTH]) except ValueError: return date_obj.replace(day=options['last']) ","def set_correct_day_from_settings(date_obj, settings, current_day=None): """""" Set the correct day according to the `PREFER_DAY_OF_MONTH` setting."""""" options = { 'first': 1, 'last': get_last_day_of_month(date_obj.year, date_obj.month), 'current': current_day or datetime.now().day } try: return date_obj.replace(day=options[settings.PREFER_DAY_OF_MONTH]) except ValueError: return date_obj.replace(day=options['last']) " 27266,"def test_date_time_literals(): ibis.date(2022, 2, 4) ibis.time(16, 20, 00) ibis.timestamp(2022, 2, 4, 16, 20, 00) ","def test_date_time_literals(): ibis.date(2022, 2, 4) assert ibis.time(16, 20, 00).type() == dt.time ibis.timestamp(2022, 2, 4, 16, 20, 00) " 39937,"def collect_external_ip_address(emitter: StdoutEmitter, network: str, force: bool = False) -> str: # From environment variable # TODO: remove this environment variable? ip = os.environ.get(NUCYPHER_ENVVAR_WORKER_IP_ADDRESS) if ip: message = f'Using IP address from {NUCYPHER_ENVVAR_WORKER_IP_ADDRESS} environment variable' emitter.message(message, verbosity=2) return ip # From node swarm try: message = f'Detecting external IP address automatically' emitter.message(message, verbosity=2) ip = determine_external_ip_address(network=network) except UnknownIPAddress: if force: raise emitter.message('Cannot automatically determine external IP address - input required') # Confirmation if not force: if not click.confirm(CONFIRM_URSULA_IPV4_ADDRESS.format(rest_host=ip)): ip = click.prompt(COLLECT_URSULA_IPV4_ADDRESS, type=IPV4_ADDRESS) return ip ","def collect_external_ip_address(emitter: StdoutEmitter, network: str, force: bool = False) -> str: # From environment variable # TODO: remove this environment variable? ip = os.environ.get(NUCYPHER_ENVVAR_WORKER_IP_ADDRESS) if ip: message = f'Using IP address, {ip}, from {NUCYPHER_ENVVAR_WORKER_IP_ADDRESS} environment variable' emitter.message(message, verbosity=2) return ip # From node swarm try: message = f'Detecting external IP address automatically' emitter.message(message, verbosity=2) ip = determine_external_ip_address(network=network) except UnknownIPAddress: if force: raise emitter.message('Cannot automatically determine external IP address - input required') # Confirmation if not force: if not click.confirm(CONFIRM_URSULA_IPV4_ADDRESS.format(rest_host=ip)): ip = click.prompt(COLLECT_URSULA_IPV4_ADDRESS, type=IPV4_ADDRESS) return ip " 39138,"def skipIfNoRNNT(test_item): try: import torch.ops.torchaudio.rnnt_loss as _ return test_item except ImportError: return unittest.skip(""torchaudio C++ extension is not compiled with RNNT"")(test_item) ","def skipIfNoRNNT(test_item): try: _ = torch.ops.torchaudio.rnn_loss return test_item except ImportError: return unittest.skip(""torchaudio C++ extension is not compiled with RNNT"")(test_item) " 4659,"def _create_plot_component(obj): # Setup the spectrum plot frequencies = linspace(0.0, float(SAMPLING_RATE) / 2, num=NUM_SAMPLES / 2) obj.spectrum_data = ArrayPlotData(frequency=frequencies) empty_amplitude = zeros(NUM_SAMPLES / 2) obj.spectrum_data.set_data(""amplitude"", empty_amplitude) obj.spectrum_plot = Plot(obj.spectrum_data) spec_renderer = obj.spectrum_plot.plot( (""frequency"", ""amplitude""), name=""Spectrum"", color=""red"" )[0] obj.spectrum_plot.padding = 50 obj.spectrum_plot.title = ""Spectrum"" spec_range = list(obj.spectrum_plot.plots.values())[0][ 0 ].value_mapper.range spec_range.low = 0.0 spec_range.high = 5.0 obj.spectrum_plot.index_axis.title = ""Frequency (hz)"" obj.spectrum_plot.value_axis.title = ""Amplitude"" # Time Series plot times = linspace(0.0, float(NUM_SAMPLES) / SAMPLING_RATE, num=NUM_SAMPLES) obj.time_data = ArrayPlotData(time=times) empty_amplitude = zeros(NUM_SAMPLES) obj.time_data.set_data(""amplitude"", empty_amplitude) obj.time_plot = Plot(obj.time_data) obj.time_plot.plot((""time"", ""amplitude""), name=""Time"", color=""blue"") obj.time_plot.padding = 50 obj.time_plot.title = ""Time"" obj.time_plot.index_axis.title = ""Time (seconds)"" obj.time_plot.value_axis.title = ""Amplitude"" time_range = list(obj.time_plot.plots.values())[0][0].value_mapper.range time_range.low = -0.2 time_range.high = 0.2 # Spectrogram plot values = [zeros(NUM_SAMPLES / 2) for i in range(SPECTROGRAM_LENGTH)] p = WaterfallRenderer( index=spec_renderer.index, values=values, index_mapper=LinearMapper(range=obj.spectrum_plot.index_mapper.range), value_mapper=LinearMapper( range=DataRange1D(low=0, high=SPECTROGRAM_LENGTH) ), y2_mapper=LinearMapper( low_pos=0, high_pos=8, range=DataRange1D(low=0, high=15) ), ) spectrogram_plot = p obj.spectrogram_plot = p dummy = Plot() dummy.padding = 50 dummy.index_axis.mapper.range = p.index_mapper.range dummy.index_axis.title = ""Frequency (hz)"" dummy.add(p) container = HPlotContainer() container.add(obj.spectrum_plot) container.add(obj.time_plot) c2 = VPlotContainer() c2.add(dummy) c2.add(container) return c2 ","def _create_plot_component(obj): # Setup the spectrum plot frequencies = linspace(0.0, float(SAMPLING_RATE) / 2, num=NUM_SAMPLES / 2) obj.spectrum_data = ArrayPlotData(frequency=frequencies) empty_amplitude = zeros(NUM_SAMPLES / 2) obj.spectrum_data.set_data(""amplitude"", empty_amplitude) obj.spectrum_plot = Plot(obj.spectrum_data) spec_renderer = obj.spectrum_plot.plot( (""frequency"", ""amplitude""), name=""Spectrum"", color=""red"" )[0] obj.spectrum_plot.padding = 50 obj.spectrum_plot.title = ""Spectrum"" spec_range = list(obj.spectrum_plot.plots.values())[0][0].value_mapper.range spec_range.low = 0.0 spec_range.high = 5.0 obj.spectrum_plot.index_axis.title = ""Frequency (hz)"" obj.spectrum_plot.value_axis.title = ""Amplitude"" # Time Series plot times = linspace(0.0, float(NUM_SAMPLES) / SAMPLING_RATE, num=NUM_SAMPLES) obj.time_data = ArrayPlotData(time=times) empty_amplitude = zeros(NUM_SAMPLES) obj.time_data.set_data(""amplitude"", empty_amplitude) obj.time_plot = Plot(obj.time_data) obj.time_plot.plot((""time"", ""amplitude""), name=""Time"", color=""blue"") obj.time_plot.padding = 50 obj.time_plot.title = ""Time"" obj.time_plot.index_axis.title = ""Time (seconds)"" obj.time_plot.value_axis.title = ""Amplitude"" time_range = list(obj.time_plot.plots.values())[0][0].value_mapper.range time_range.low = -0.2 time_range.high = 0.2 # Spectrogram plot values = [zeros(NUM_SAMPLES / 2) for i in range(SPECTROGRAM_LENGTH)] p = WaterfallRenderer( index=spec_renderer.index, values=values, index_mapper=LinearMapper(range=obj.spectrum_plot.index_mapper.range), value_mapper=LinearMapper( range=DataRange1D(low=0, high=SPECTROGRAM_LENGTH) ), y2_mapper=LinearMapper( low_pos=0, high_pos=8, range=DataRange1D(low=0, high=15) ), ) spectrogram_plot = p obj.spectrogram_plot = p dummy = Plot() dummy.padding = 50 dummy.index_axis.mapper.range = p.index_mapper.range dummy.index_axis.title = ""Frequency (hz)"" dummy.add(p) container = HPlotContainer() container.add(obj.spectrum_plot) container.add(obj.time_plot) c2 = VPlotContainer() c2.add(dummy) c2.add(container) return c2 " 49130,"def compute_edge_weights(edge_ids, edge_probabilities, beta, threshold): """""" Convert edge probabilities to energies for the multicut problem. edge_ids: The list of edges in the graph. shape=(N, 2) edge_probabilities: 1-D, float (1.0 means edge is CUT, disconnecting the two SPs) beta: scalar (float) threshold: scalar (float), moves the 0 of the edge weights (default threshold = 0.5) Special behavior: If any node has ID 0, all of it's edges will be given an artificially low energy, to prevent it from merging with its neighbors, regardless of what the edge_probabilities say. """""" def rescale(probabilities, threshold): """""" Given a threshold in the range (0,1), rescales the probabilities below and above the threshold to the ranges (0,0.5], and (0.5,1) respectively. This is needed to implement an effective 'moving' of the 0 weight, since the multicut algorithm implicitly calculates that weights change sign at p=0.5. :param probabilities: 1d array (float). Probability data within range (0,1) :param threshold: scalar (float). The new threshold for the algorithm. :return: Rescaled data to be used in algorithm. """""" out = np.zeros_like(probabilities) data_lower = probabilities[probabilities <= threshold] data_upper = probabilities[probabilities > threshold] data_lower = (data_lower / threshold) * 0.5 data_upper = (((data_upper - threshold) / (1 - threshold)) * 0.5) + 0.5 out[probabilities <= threshold] = data_lower out[probabilities > threshold] = data_upper return out p1 = edge_probabilities # P(Edge=CUT) p1 = np.clip(p1, 0.001, 0.999) p1 = rescale(p1, threshold) p0 = 1.0 - p1 # P(Edge=NOT CUT) edge_weights = np.log(p0 / p1) + np.log((1 - beta) / beta) # See note special behavior, above edges_touching_zero = edge_ids[:, 0] == 0 if edges_touching_zero.any(): logger.warning(""Volume contains label 0, which will be excluded from the segmentation."") MINIMUM_ENERGY = -1000.0 edge_weights[edges_touching_zero] = MINIMUM_ENERGY return edge_weights ","def compute_edge_weights(edge_ids, edge_probabilities, beta, threshold): """""" Convert edge probabilities to energies for the multicut problem. edge_ids: The list of edges in the graph. shape=(N, 2) edge_probabilities: 1-D, float (1.0 means edge is CUT, disconnecting the two SPs) beta: scalar (float) threshold: scalar (float), moves the 0 of the edge weights (default threshold = 0.5) Special behavior: If any node has ID 0, all of it's edges will be given an artificially low energy, to prevent it from merging with its neighbors, regardless of what the edge_probabilities say. """""" def rescale(probabilities, threshold): """""" Given a threshold in the range (0,1), rescales the probabilities below and above the threshold to the ranges (0,0.5], and (0.5,1) respectively. This is needed to implement an effective 'moving' of the 0 weight, since the multicut algorithm implicitly calculates that weights change sign at p=0.5. :param probabilities: 1d array (float). Probability data within range (0,1) :param threshold: scalar (float). The new threshold for the algorithm. :return: Rescaled data to be used in algorithm. """""" out = np.zeros_like(probabilities) data_lower = probabilities[probabilities <= threshold] data_upper = probabilities[probabilities > threshold] data_lower = (data_lower / threshold) * 0.5 data_upper = (((data_upper - threshold) / (1 - threshold)) * 0.5) + 0.5 out[probabilities <= threshold] = data_lower out[probabilities > threshold] = data_upper return out p1 = edge_probabilities # P(Edge=CUT) p1 = np.clip(p1, 0.001, 0.999) # Rescale [0; t] to [0; 0.5], and [t; 1] to [0.5; 1]. p1 = np.where(p1 <= threshold, p1 / (2 * threshold), 0.5 + (p1 - threshold) / (2 * (1 - threshold))) p0 = 1.0 - p1 # P(Edge=NOT CUT) edge_weights = np.log(p0 / p1) + np.log((1 - beta) / beta) # See note special behavior, above edges_touching_zero = edge_ids[:, 0] == 0 if edges_touching_zero.any(): logger.warning(""Volume contains label 0, which will be excluded from the segmentation."") MINIMUM_ENERGY = -1000.0 edge_weights[edges_touching_zero] = MINIMUM_ENERGY return edge_weights " 11834,"def truetype(font=None, size=10, index=0, encoding="""", layout_engine=None): """""" Load a TrueType or OpenType font from a file or file-like object, and create a font object. This function loads a font object from the given file or file-like object, and creates a font object for a font of the given size. Note that Pillow uses FreeType to open font files. If you are opening many fonts simultaneously on Windows, be aware that Windows limits the number of files that can be open in C at once to 512. If you approach that limit, an ``OSError`` may be thrown, reporting that FreeType ""cannot open resource"". This function requires the _imagingft service. :param font: A filename or file-like object containing a TrueType font. If the file is not found in this filename, the loader may also search in other directories, such as the :file:`fonts/` directory on Windows or :file:`/Library/Fonts/`, :file:`/System/Library/Fonts/` and :file:`~/Library/Fonts/` on macOS. :param size: The requested size, in points. :param index: Which font face to load (default is first available face). :param encoding: Which font encoding to use (default is Unicode). Common encodings are ""unic"" (Unicode), ""symb"" (Microsoft Symbol), ""ADOB"" (Adobe Standard), ""ADBE"" (Adobe Expert), and ""armn"" (Apple Roman). See the FreeType documentation for more information. :param layout_engine: Which layout engine to use, if available: `ImageFont.LAYOUT_BASIC` or `ImageFont.LAYOUT_RAQM`. :return: A font object. :exception IOError: If the file could not be read. """""" def freetype(font): return FreeTypeFont(font, size, index, encoding, layout_engine) try: return freetype(font) except IOError: if not isPath(font): raise ttf_filename = os.path.basename(font) dirs = [] if sys.platform == ""win32"": # check the windows font repository # NOTE: must use uppercase WINDIR, to work around bugs in # 1.5.2's os.environ.get() windir = os.environ.get(""WINDIR"") if windir: dirs.append(os.path.join(windir, ""fonts"")) elif sys.platform in (""linux"", ""linux2""): lindirs = os.environ.get(""XDG_DATA_DIRS"", """") if not lindirs: # According to the freedesktop spec, XDG_DATA_DIRS should # default to /usr/share lindirs = ""/usr/share"" dirs += [os.path.join(lindir, ""fonts"") for lindir in lindirs.split("":"")] elif sys.platform == ""darwin"": dirs += [ ""/Library/Fonts"", ""/System/Library/Fonts"", os.path.expanduser(""~/Library/Fonts""), ] ext = os.path.splitext(ttf_filename)[1] first_font_with_a_different_extension = None for directory in dirs: for walkroot, walkdir, walkfilenames in os.walk(directory): for walkfilename in walkfilenames: if ext and walkfilename == ttf_filename: return freetype(os.path.join(walkroot, walkfilename)) elif not ext and os.path.splitext(walkfilename)[0] == ttf_filename: fontpath = os.path.join(walkroot, walkfilename) if os.path.splitext(fontpath)[1] == "".ttf"": return freetype(fontpath) if not ext and first_font_with_a_different_extension is None: first_font_with_a_different_extension = fontpath if first_font_with_a_different_extension: return freetype(first_font_with_a_different_extension) raise ","def truetype(font=None, size=10, index=0, encoding="""", layout_engine=None): """""" Load a TrueType or OpenType font from a file or file-like object, and create a font object. This function loads a font object from the given file or file-like object, and creates a font object for a font of the given size. Pillow uses FreeType to open font files. If you are opening many fonts simultaneously on Windows, be aware that Windows limits the number of files that can be open in C at once to 512. If you approach that limit, an ``OSError`` may be thrown, reporting that FreeType ""cannot open resource"". This function requires the _imagingft service. :param font: A filename or file-like object containing a TrueType font. If the file is not found in this filename, the loader may also search in other directories, such as the :file:`fonts/` directory on Windows or :file:`/Library/Fonts/`, :file:`/System/Library/Fonts/` and :file:`~/Library/Fonts/` on macOS. :param size: The requested size, in points. :param index: Which font face to load (default is first available face). :param encoding: Which font encoding to use (default is Unicode). Common encodings are ""unic"" (Unicode), ""symb"" (Microsoft Symbol), ""ADOB"" (Adobe Standard), ""ADBE"" (Adobe Expert), and ""armn"" (Apple Roman). See the FreeType documentation for more information. :param layout_engine: Which layout engine to use, if available: `ImageFont.LAYOUT_BASIC` or `ImageFont.LAYOUT_RAQM`. :return: A font object. :exception IOError: If the file could not be read. """""" def freetype(font): return FreeTypeFont(font, size, index, encoding, layout_engine) try: return freetype(font) except IOError: if not isPath(font): raise ttf_filename = os.path.basename(font) dirs = [] if sys.platform == ""win32"": # check the windows font repository # NOTE: must use uppercase WINDIR, to work around bugs in # 1.5.2's os.environ.get() windir = os.environ.get(""WINDIR"") if windir: dirs.append(os.path.join(windir, ""fonts"")) elif sys.platform in (""linux"", ""linux2""): lindirs = os.environ.get(""XDG_DATA_DIRS"", """") if not lindirs: # According to the freedesktop spec, XDG_DATA_DIRS should # default to /usr/share lindirs = ""/usr/share"" dirs += [os.path.join(lindir, ""fonts"") for lindir in lindirs.split("":"")] elif sys.platform == ""darwin"": dirs += [ ""/Library/Fonts"", ""/System/Library/Fonts"", os.path.expanduser(""~/Library/Fonts""), ] ext = os.path.splitext(ttf_filename)[1] first_font_with_a_different_extension = None for directory in dirs: for walkroot, walkdir, walkfilenames in os.walk(directory): for walkfilename in walkfilenames: if ext and walkfilename == ttf_filename: return freetype(os.path.join(walkroot, walkfilename)) elif not ext and os.path.splitext(walkfilename)[0] == ttf_filename: fontpath = os.path.join(walkroot, walkfilename) if os.path.splitext(fontpath)[1] == "".ttf"": return freetype(fontpath) if not ext and first_font_with_a_different_extension is None: first_font_with_a_different_extension = fontpath if first_font_with_a_different_extension: return freetype(first_font_with_a_different_extension) raise " 35760,"def main(args): utils.setup_ddp(args) args.test_only = args.train_dataset is None if args.distributed and args.device == ""cpu"": raise ValueError(""The device must be cuda if we want to run in distributed mode using torchrun"") device = torch.device(args.device) if args.use_deterministic_algorithms: torch.backends.cudnn.benchmark = False torch.use_deterministic_algorithms(True) else: torch.backends.cudnn.benchmark = True model = torchvision.models.optical_flow.__dict__[args.model](weights=args.weights) if args.distributed: model = model.to(args.local_rank) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank]) model_without_ddp = model.module else: model.to(device) model_without_ddp = model if args.resume is not None: checkpoint = torch.load(args.resume, map_location=""cpu"") model_without_ddp.load_state_dict(checkpoint[""model""]) if args.test_only: # Set deterministic CUDNN algorithms, since they can affect epe a fair bit. torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True evaluate(model, args) return print(f""Parameter Count: {sum(p.numel() for p in model.parameters() if p.requires_grad)}"") train_dataset = get_train_dataset(args.train_dataset, args.dataset_root) optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, eps=args.adamw_eps) scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer=optimizer, max_lr=args.lr, epochs=args.epochs, steps_per_epoch=ceil(len(train_dataset) / (args.world_size * args.batch_size)), pct_start=0.05, cycle_momentum=False, anneal_strategy=""linear"", ) if args.resume is not None: optimizer.load_state_dict(checkpoint[""optimizer""]) scheduler.load_state_dict(checkpoint[""scheduler""]) args.start_epoch = checkpoint[""epoch""] + 1 else: args.start_epoch = 0 torch.backends.cudnn.benchmark = True model.train() if args.freeze_batch_norm: utils.freeze_batch_norm(model.module) if args.distributed: sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True, drop_last=True) else: sampler = torch.utils.data.RandomSampler(train_dataset) train_loader = torch.utils.data.DataLoader( train_dataset, sampler=sampler, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, ) logger = utils.MetricLogger() done = False for epoch in range(args.start_epoch, args.epochs): print(f""EPOCH {epoch}"") if args.distributed: # needed on distributed mode, otherwise the data loading order would be the same for all epochs sampler.set_epoch(epoch) train_one_epoch( model=model, optimizer=optimizer, scheduler=scheduler, train_loader=train_loader, logger=logger, args=args, ) # Note: we don't sync the SmoothedValues across processes, so the printed metrics are just those of rank 0 print(f""Epoch {epoch} done. "", logger) if not args.distributed or args.rank == 0: checkpoint = { ""model"": model_without_ddp.state_dict(), ""optimizer"": optimizer.state_dict(), ""scheduler"": scheduler.state_dict(), ""epoch"": epoch, ""args"": args, } torch.save(checkpoint, Path(args.output_dir) / f""{args.name}_{epoch}.pth"") torch.save(checkpoint, Path(args.output_dir) / f""{args.name}.pth"") if epoch % args.val_freq == 0 or done: evaluate(model, args) model.train() if args.freeze_batch_norm: utils.freeze_batch_norm(model.module) ","def main(args): utils.setup_ddp(args) args.test_only = args.train_dataset is None if args.distributed and args.device == ""cpu"": raise ValueError(""The device must be cuda if we want to run in distributed mode using torchrun"") device = torch.device(args.device) if args.use_deterministic_algorithms or args.test_only: torch.backends.cudnn.benchmark = False torch.use_deterministic_algorithms(True) else: torch.backends.cudnn.benchmark = True model = torchvision.models.optical_flow.__dict__[args.model](weights=args.weights) if args.distributed: model = model.to(args.local_rank) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank]) model_without_ddp = model.module else: model.to(device) model_without_ddp = model if args.resume is not None: checkpoint = torch.load(args.resume, map_location=""cpu"") model_without_ddp.load_state_dict(checkpoint[""model""]) if args.test_only: # Set deterministic CUDNN algorithms, since they can affect epe a fair bit. torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True evaluate(model, args) return print(f""Parameter Count: {sum(p.numel() for p in model.parameters() if p.requires_grad)}"") train_dataset = get_train_dataset(args.train_dataset, args.dataset_root) optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, eps=args.adamw_eps) scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer=optimizer, max_lr=args.lr, epochs=args.epochs, steps_per_epoch=ceil(len(train_dataset) / (args.world_size * args.batch_size)), pct_start=0.05, cycle_momentum=False, anneal_strategy=""linear"", ) if args.resume is not None: optimizer.load_state_dict(checkpoint[""optimizer""]) scheduler.load_state_dict(checkpoint[""scheduler""]) args.start_epoch = checkpoint[""epoch""] + 1 else: args.start_epoch = 0 torch.backends.cudnn.benchmark = True model.train() if args.freeze_batch_norm: utils.freeze_batch_norm(model.module) if args.distributed: sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True, drop_last=True) else: sampler = torch.utils.data.RandomSampler(train_dataset) train_loader = torch.utils.data.DataLoader( train_dataset, sampler=sampler, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, ) logger = utils.MetricLogger() done = False for epoch in range(args.start_epoch, args.epochs): print(f""EPOCH {epoch}"") if args.distributed: # needed on distributed mode, otherwise the data loading order would be the same for all epochs sampler.set_epoch(epoch) train_one_epoch( model=model, optimizer=optimizer, scheduler=scheduler, train_loader=train_loader, logger=logger, args=args, ) # Note: we don't sync the SmoothedValues across processes, so the printed metrics are just those of rank 0 print(f""Epoch {epoch} done. "", logger) if not args.distributed or args.rank == 0: checkpoint = { ""model"": model_without_ddp.state_dict(), ""optimizer"": optimizer.state_dict(), ""scheduler"": scheduler.state_dict(), ""epoch"": epoch, ""args"": args, } torch.save(checkpoint, Path(args.output_dir) / f""{args.name}_{epoch}.pth"") torch.save(checkpoint, Path(args.output_dir) / f""{args.name}.pth"") if epoch % args.val_freq == 0 or done: evaluate(model, args) model.train() if args.freeze_batch_norm: utils.freeze_batch_norm(model.module) " 5735,"def solve(a, b, sym_pos=False, lower=False, overwrite_a=False, overwrite_b=False, debug=None, check_finite=True, assume_a='gen', transposed=False): """""" Solves the linear equation set ``ax = b`` for the unknown ``x`` for square ``a`` matrix. If the data matrix is known to be a particular type then supplying the corresponding string to ``assume_a`` key chooses the dedicated solver. The available options are =================== ======== generic matrix 'gen' symmetric 'sym' hermitian 'her' positive definite 'pos' =================== ======== If omitted, ``'gen'`` is the default structure. The datatype of the arrays define which solver is called regardless of the values. In other words, even when the complex array entries have precisely zero imaginary parts, the complex solver will be called based on the data type of the array. Parameters ---------- a : (N, N) array_like Square input data b : (N, NRHS) array_like Input data for the right hand side. sym_pos : bool, optional Assume `a` is symmetric and positive definite. This key is deprecated and ``assume_a='pos'`` is recommended instead. The functionality is the same. It will be removed in the future. Default is False. lower : bool, optional If True, the calulation is based only on the data in the lower triangle of matrix `a` instead of data in the upper triangle. Ignored if ``assume_a == 'gen'``. Default is False. overwrite_a : bool, optional Allow overwriting data in `a` (may enhance performance). Default is False. overwrite_b : bool, optional Allow overwriting data in `b` (may enhance performance). Default is False. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default is True. assume_a : str, optional Valid entries are explained above. Default is ``'gen'``. transposed : bool, optional If True, calculate ``a^T x = b``. Only implemented for real matrices `a`. Default is False. Returns ------- x : (N, NRHS) ndarray The solution array. Raises ------ ValueError If size mismatches detected or input a is not square. LinAlgError If the matrix is singular. LinAlgWarning If an ill-conditioned input a is detected. NotImplementedError If transposed is True and input a is a complex matrix. Examples -------- Given `a` and `b`, solve for `x`: >>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]]) >>> b = np.array([2, 4, -1]) >>> from scipy import linalg >>> x = linalg.solve(a, b) >>> x array([ 2., -2., 9.]) >>> np.dot(a, x) == b array([ True, True, True], dtype=bool) Notes ----- If the input b matrix is a 1-D array with N elements, when supplied together with an NxN input a, it is assumed as a valid column vector despite the apparent size mismatch. This is compatible with the numpy.dot() behavior and the returned result is still 1-D array. The generic, symmetric, Hermitian and positive definite solutions are obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of LAPACK respectively. """""" # Flags for 1-D or N-D right-hand side b_is_1D = False a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite)) b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite)) n = a1.shape[0] overwrite_a = overwrite_a or _datacopied(a1, a) overwrite_b = overwrite_b or _datacopied(b1, b) if a1.shape[0] != a1.shape[1]: raise ValueError('Input a needs to be a square matrix.') if n != b1.shape[0]: # Last chance to catch 1x1 scalar a and 1-D b arrays if not (n == 1 and b1.size != 0): raise ValueError('Input b has to have same number of rows as ' 'input a') # accommodate empty arrays if b1.size == 0: return np.asfortranarray(b1.copy()) # regularize 1-D b arrays to 2D if b1.ndim == 1: if n == 1: b1 = b1[None, :] else: b1 = b1[:, None] b_is_1D = True # Backwards compatibility - old keyword. if sym_pos: assume_a = 'pos' if assume_a not in ('gen', 'sym', 'her', 'pos'): raise ValueError('{} is not a recognized matrix structure' ''.format(assume_a)) # for a real matrix, describe it as ""symmetric"", not ""hermitian"" # (lapack doesn't know what to do with real hermitian matrices) if assume_a == 'her' and not np.iscomplexobj(a1): assume_a = 'sym' # Deprecate keyword ""debug"" if debug is not None: warn('Use of the ""debug"" keyword is deprecated ' 'and this keyword will be removed in future ' 'versions of SciPy.', DeprecationWarning, stacklevel=2) # Get the correct lamch function. # The LAMCH functions only exists for S and D # So for complex values we have to convert to real/double. if a1.dtype.char in 'fF': # single precision lamch = get_lapack_funcs('lamch', dtype='f') else: lamch = get_lapack_funcs('lamch', dtype='d') # Currently we do not have the other forms of the norm calculators # lansy, lanpo, lanhe. # However, in any case they only reduce computations slightly... lange = get_lapack_funcs('lange', (a1,)) # Since the I-norm and 1-norm are the same for symmetric matrices # we can collect them all in this one call # Note however, that when issuing 'gen' and form!='none', then # the I-norm should be used if transposed: trans = 1 norm = 'I' if np.iscomplexobj(a1): raise NotImplementedError('scipy.linalg.solve can currently ' 'not solve a^T x = b or a^H x = b ' 'for complex matrices.') else: trans = 0 norm = '1' anorm = lange(norm, a1) # Generalized case 'gesv' if assume_a == 'gen': gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'), (a1, b1)) lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a) _solve_check(n, info) x, info = getrs(lu, ipvt, b1, trans=trans, overwrite_b=overwrite_b) _solve_check(n, info) rcond, info = gecon(lu, anorm, norm=norm) # Hermitian case 'hesv' elif assume_a == 'her': hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv', 'hesv_lwork'), (a1, b1)) lwork = _compute_lwork(hesv_lw, n, lower) lu, ipvt, x, info = hesv(a1, b1, lwork=lwork, lower=lower, overwrite_a=overwrite_a, overwrite_b=overwrite_b) _solve_check(n, info) rcond, info = hecon(lu, ipvt, anorm) # Symmetric case 'sysv' elif assume_a == 'sym': sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv', 'sysv_lwork'), (a1, b1)) lwork = _compute_lwork(sysv_lw, n, lower) lu, ipvt, x, info = sysv(a1, b1, lwork=lwork, lower=lower, overwrite_a=overwrite_a, overwrite_b=overwrite_b) _solve_check(n, info) rcond, info = sycon(lu, ipvt, anorm) # Positive definite case 'posv' else: pocon, posv = get_lapack_funcs(('pocon', 'posv'), (a1, b1)) lu, x, info = posv(a1, b1, lower=lower, overwrite_a=overwrite_a, overwrite_b=overwrite_b) _solve_check(n, info) rcond, info = pocon(lu, anorm) _solve_check(n, info, lamch, rcond) if b_is_1D: x = x.ravel() return x ","def solve(a, b, sym_pos=False, lower=False, overwrite_a=False, overwrite_b=False, debug=None, check_finite=True, assume_a='gen', transposed=False): """""" Solves the linear equation set ``a@x = b`` for the unknown ``x`` for square ``a`` matrix. If the data matrix is known to be a particular type then supplying the corresponding string to ``assume_a`` key chooses the dedicated solver. The available options are =================== ======== generic matrix 'gen' symmetric 'sym' hermitian 'her' positive definite 'pos' =================== ======== If omitted, ``'gen'`` is the default structure. The datatype of the arrays define which solver is called regardless of the values. In other words, even when the complex array entries have precisely zero imaginary parts, the complex solver will be called based on the data type of the array. Parameters ---------- a : (N, N) array_like Square input data b : (N, NRHS) array_like Input data for the right hand side. sym_pos : bool, optional Assume `a` is symmetric and positive definite. This key is deprecated and ``assume_a='pos'`` is recommended instead. The functionality is the same. It will be removed in the future. Default is False. lower : bool, optional If True, the calulation is based only on the data in the lower triangle of matrix `a` instead of data in the upper triangle. Ignored if ``assume_a == 'gen'``. Default is False. overwrite_a : bool, optional Allow overwriting data in `a` (may enhance performance). Default is False. overwrite_b : bool, optional Allow overwriting data in `b` (may enhance performance). Default is False. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default is True. assume_a : str, optional Valid entries are explained above. Default is ``'gen'``. transposed : bool, optional If True, calculate ``a^T x = b``. Only implemented for real matrices `a`. Default is False. Returns ------- x : (N, NRHS) ndarray The solution array. Raises ------ ValueError If size mismatches detected or input a is not square. LinAlgError If the matrix is singular. LinAlgWarning If an ill-conditioned input a is detected. NotImplementedError If transposed is True and input a is a complex matrix. Examples -------- Given `a` and `b`, solve for `x`: >>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]]) >>> b = np.array([2, 4, -1]) >>> from scipy import linalg >>> x = linalg.solve(a, b) >>> x array([ 2., -2., 9.]) >>> np.dot(a, x) == b array([ True, True, True], dtype=bool) Notes ----- If the input b matrix is a 1-D array with N elements, when supplied together with an NxN input a, it is assumed as a valid column vector despite the apparent size mismatch. This is compatible with the numpy.dot() behavior and the returned result is still 1-D array. The generic, symmetric, Hermitian and positive definite solutions are obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of LAPACK respectively. """""" # Flags for 1-D or N-D right-hand side b_is_1D = False a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite)) b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite)) n = a1.shape[0] overwrite_a = overwrite_a or _datacopied(a1, a) overwrite_b = overwrite_b or _datacopied(b1, b) if a1.shape[0] != a1.shape[1]: raise ValueError('Input a needs to be a square matrix.') if n != b1.shape[0]: # Last chance to catch 1x1 scalar a and 1-D b arrays if not (n == 1 and b1.size != 0): raise ValueError('Input b has to have same number of rows as ' 'input a') # accommodate empty arrays if b1.size == 0: return np.asfortranarray(b1.copy()) # regularize 1-D b arrays to 2D if b1.ndim == 1: if n == 1: b1 = b1[None, :] else: b1 = b1[:, None] b_is_1D = True # Backwards compatibility - old keyword. if sym_pos: assume_a = 'pos' if assume_a not in ('gen', 'sym', 'her', 'pos'): raise ValueError('{} is not a recognized matrix structure' ''.format(assume_a)) # for a real matrix, describe it as ""symmetric"", not ""hermitian"" # (lapack doesn't know what to do with real hermitian matrices) if assume_a == 'her' and not np.iscomplexobj(a1): assume_a = 'sym' # Deprecate keyword ""debug"" if debug is not None: warn('Use of the ""debug"" keyword is deprecated ' 'and this keyword will be removed in future ' 'versions of SciPy.', DeprecationWarning, stacklevel=2) # Get the correct lamch function. # The LAMCH functions only exists for S and D # So for complex values we have to convert to real/double. if a1.dtype.char in 'fF': # single precision lamch = get_lapack_funcs('lamch', dtype='f') else: lamch = get_lapack_funcs('lamch', dtype='d') # Currently we do not have the other forms of the norm calculators # lansy, lanpo, lanhe. # However, in any case they only reduce computations slightly... lange = get_lapack_funcs('lange', (a1,)) # Since the I-norm and 1-norm are the same for symmetric matrices # we can collect them all in this one call # Note however, that when issuing 'gen' and form!='none', then # the I-norm should be used if transposed: trans = 1 norm = 'I' if np.iscomplexobj(a1): raise NotImplementedError('scipy.linalg.solve can currently ' 'not solve a^T x = b or a^H x = b ' 'for complex matrices.') else: trans = 0 norm = '1' anorm = lange(norm, a1) # Generalized case 'gesv' if assume_a == 'gen': gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'), (a1, b1)) lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a) _solve_check(n, info) x, info = getrs(lu, ipvt, b1, trans=trans, overwrite_b=overwrite_b) _solve_check(n, info) rcond, info = gecon(lu, anorm, norm=norm) # Hermitian case 'hesv' elif assume_a == 'her': hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv', 'hesv_lwork'), (a1, b1)) lwork = _compute_lwork(hesv_lw, n, lower) lu, ipvt, x, info = hesv(a1, b1, lwork=lwork, lower=lower, overwrite_a=overwrite_a, overwrite_b=overwrite_b) _solve_check(n, info) rcond, info = hecon(lu, ipvt, anorm) # Symmetric case 'sysv' elif assume_a == 'sym': sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv', 'sysv_lwork'), (a1, b1)) lwork = _compute_lwork(sysv_lw, n, lower) lu, ipvt, x, info = sysv(a1, b1, lwork=lwork, lower=lower, overwrite_a=overwrite_a, overwrite_b=overwrite_b) _solve_check(n, info) rcond, info = sycon(lu, ipvt, anorm) # Positive definite case 'posv' else: pocon, posv = get_lapack_funcs(('pocon', 'posv'), (a1, b1)) lu, x, info = posv(a1, b1, lower=lower, overwrite_a=overwrite_a, overwrite_b=overwrite_b) _solve_check(n, info) rcond, info = pocon(lu, anorm) _solve_check(n, info, lamch, rcond) if b_is_1D: x = x.ravel() return x " 36522,"def collect_gdb(info_add): import subprocess try: proc = subprocess.Popen([""gdb"", ""-nx"", ""--version""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) version = proc.communicate()[0] if proc.returncode: # ignore gdb failure: test_gdb will log the error return except OSError: return # Only keep the first line version = version.splitlines()[0] info_add('gdb_version', version) ","def collect_gdb(info_add): import subprocess try: proc = subprocess.Popen([""gdb"", ""-nx"", ""--version""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) version = proc.communicate()[0] if proc.returncode != 0: # ignore gdb failure: test_gdb will log the error return except OSError: return # Only keep the first line version = version.splitlines()[0] info_add('gdb_version', version) " 59194,"def _run_pip(args, additional_paths=None): # Add our bundled software to the sys.path so we can import it if additional_paths is not None: sys.path = additional_paths + sys.path # Invoke pip as if it's the main module, and catch the exit. backup_argv = sys.argv[:] sys.argv[1:] = args try: runpy.run_module(""pip"", run_name=""__main__"", alter_sys=True) except SystemExit as e: return e.code finally: sys.argv[:] = backup_argv raise SystemError(""pip have not exited, that should never happen"") ","def _run_pip(args, additional_paths=None): # Add our bundled software to the sys.path so we can import it if additional_paths is not None: sys.path = additional_paths + sys.path # Invoke pip as if it's the main module, and catch the exit. backup_argv = sys.argv[:] sys.argv[1:] = args try: runpy.run_module(""pip"", run_name=""__main__"", alter_sys=True) except SystemExit as e: return e.code finally: sys.argv[:] = backup_argv # run_module() didn't raise SystemExit: success return 0 " 50336,"def spam_checking_helper(community=None, deposit=None): """"""Checks community/deposit metadata for spam."""""" try: if current_app.config.get('ZENODO_SPAM_MODEL_LOCATION'): if community: task = check_metadata_for_spam.delay( community_id=community.id) if deposit: task = check_metadata_for_spam.delay(dep_id=str(deposit.id)) spam_proba = task.get(timeout=current_app.config[ 'ZENODO_SPAM_CHECK_TIMEOUT']) else: spam_proba = 0 if spam_proba > current_app.config['ZENODO_SPAM_THRESHOLD']: if not Permission(ActionNeed('admin-access')).can(): rs = RecordsSearch(index='records').query( Q('query_string', query=""owners:{}"".format( community.id_user))) if not (rs.count() or Community.query.filter_by( id_user=community.id_user).count()): current_app.config['ZENODO_SPAM_HANDLING_ACTIONS']( community=community, deposit=deposit) except Exception: current_app.logger.exception(u'Could not check for spam') if community: flash(""Community was successfully created."", category='success') ","def spam_checking_helper(community=None, deposit=None): """"""Checks community/deposit metadata for spam."""""" try: if current_app.config.get('ZENODO_SPAM_MODEL_LOCATION'): if community: task = check_metadata_for_spam.delay( community_id=community.id) if deposit: task = check_metadata_for_spam.delay(dep_id=str(deposit.id)) spam_proba = task.get(timeout=current_app.config[ 'ZENODO_SPAM_CHECK_TIMEOUT']) else: spam_proba = 0 if spam_proba > current_app.config['ZENODO_SPAM_THRESHOLD']: if not Permission(ActionNeed('admin-access')).can(): has_records = rs = RecordsSearch(index='records').query( Q('query_string', query=""owners:{}"".format( community.id_user))).count() has_communities = Community.query.filter_by( id_user=community.id_user).count() if not (has_records or has_communities): current_app.config['ZENODO_SPAM_HANDLING_ACTIONS']( community=community, deposit=deposit) except Exception: current_app.logger.exception(u'Could not check for spam') if community: flash(""Community was successfully created."", category='success') " 54859,"def load_partition(part_config, part_id, load_feats=True): ''' Load data of a partition from the data path. A partition data includes a graph structure of the partition, a dict of node tensors, a dict of edge tensors and some metadata. The partition may contain the HALO nodes, which are the nodes replicated from other partitions. However, the dict of node tensors only contains the node data that belongs to the local partition. Similarly, edge tensors only contains the edge data that belongs to the local partition. The metadata include the information of the global graph (not the local partition), which includes the number of nodes, the number of edges as well as the node assignment of the global graph. The function currently loads data through the local filesystem interface. Parameters ---------- part_config : str The path of the partition config file. part_id : int The partition ID. load_feats : bool, optional Whether to load node/edge feats. Default: True. Returns ------- DGLGraph The graph partition structure. Dict[str, Tensor] Node features. Dict[str, Tensor] Edge features. GraphPartitionBook The graph partition information. str The graph name List[str] The node types List[str] The edge types ''' config_path = os.path.dirname(part_config) relative_to_config = lambda path: os.path.join(config_path, path) with open(part_config) as conf_f: part_metadata = json.load(conf_f) assert 'part-{}'.format(part_id) in part_metadata, ""part-{} does not exist"".format(part_id) part_files = part_metadata['part-{}'.format(part_id)] assert 'part_graph' in part_files, ""the partition does not contain graph structure."" graph = load_graphs(relative_to_config(part_files['part_graph']))[0][0] assert NID in graph.ndata, ""the partition graph should contain node mapping to global node ID"" assert EID in graph.edata, ""the partition graph should contain edge mapping to global edge ID"" gpb, graph_name, ntypes, etypes = load_partition_book(part_config, part_id, graph) ntypes_list, etypes_list = [], [] for ntype in ntypes: ntype_id = ntypes[ntype] # graph.ndata[NID] are global homogeneous node IDs. nids = F.boolean_mask(graph.ndata[NID], _get_inner_node_mask(graph, ntype_id)) partids1 = gpb.nid2partid(nids) _, per_type_nids = gpb.map_to_per_ntype(nids) partids2 = gpb.nid2partid(per_type_nids, ntype) assert np.all(F.asnumpy(partids1 == part_id)), 'load a wrong partition' assert np.all(F.asnumpy(partids2 == part_id)), 'load a wrong partition' ntypes_list.append(ntype) for etype in etypes: etype_id = etypes[etype] # graph.edata[EID] are global homogeneous edge IDs. eids = F.boolean_mask(graph.edata[EID], _get_inner_edge_mask(graph, etype_id)) partids1 = gpb.eid2partid(eids) _, per_type_eids = gpb.map_to_per_etype(eids) partids2 = gpb.eid2partid(per_type_eids, etype) assert np.all(F.asnumpy(partids1 == part_id)), 'load a wrong partition' assert np.all(F.asnumpy(partids2 == part_id)), 'load a wrong partition' etypes_list.append(etype) node_feats = {} edge_feats = {} if load_feats: node_feats, edge_feats = load_partition_feats(part_config, part_id) return graph, node_feats, edge_feats, gpb, graph_name, ntypes_list, etypes_list ","def load_partition(part_config, part_id, load_feats=True): ''' Load data of a partition from the data path. A partition data includes a graph structure of the partition, a dict of node tensors, a dict of edge tensors and some metadata. The partition may contain the HALO nodes, which are the nodes replicated from other partitions. However, the dict of node tensors only contains the node data that belongs to the local partition. Similarly, edge tensors only contains the edge data that belongs to the local partition. The metadata include the information of the global graph (not the local partition), which includes the number of nodes, the number of edges as well as the node assignment of the global graph. The function currently loads data through the local filesystem interface. Parameters ---------- part_config : str The path of the partition config file. part_id : int The partition ID. load_feats : bool, optional Whether to load node/edge feats. If False, the returned node/edge feature dictionaries will be empty. Default: True. Returns ------- DGLGraph The graph partition structure. Dict[str, Tensor] Node features. Dict[str, Tensor] Edge features. GraphPartitionBook The graph partition information. str The graph name List[str] The node types List[str] The edge types ''' config_path = os.path.dirname(part_config) relative_to_config = lambda path: os.path.join(config_path, path) with open(part_config) as conf_f: part_metadata = json.load(conf_f) assert 'part-{}'.format(part_id) in part_metadata, ""part-{} does not exist"".format(part_id) part_files = part_metadata['part-{}'.format(part_id)] assert 'part_graph' in part_files, ""the partition does not contain graph structure."" graph = load_graphs(relative_to_config(part_files['part_graph']))[0][0] assert NID in graph.ndata, ""the partition graph should contain node mapping to global node ID"" assert EID in graph.edata, ""the partition graph should contain edge mapping to global edge ID"" gpb, graph_name, ntypes, etypes = load_partition_book(part_config, part_id, graph) ntypes_list, etypes_list = [], [] for ntype in ntypes: ntype_id = ntypes[ntype] # graph.ndata[NID] are global homogeneous node IDs. nids = F.boolean_mask(graph.ndata[NID], _get_inner_node_mask(graph, ntype_id)) partids1 = gpb.nid2partid(nids) _, per_type_nids = gpb.map_to_per_ntype(nids) partids2 = gpb.nid2partid(per_type_nids, ntype) assert np.all(F.asnumpy(partids1 == part_id)), 'load a wrong partition' assert np.all(F.asnumpy(partids2 == part_id)), 'load a wrong partition' ntypes_list.append(ntype) for etype in etypes: etype_id = etypes[etype] # graph.edata[EID] are global homogeneous edge IDs. eids = F.boolean_mask(graph.edata[EID], _get_inner_edge_mask(graph, etype_id)) partids1 = gpb.eid2partid(eids) _, per_type_eids = gpb.map_to_per_etype(eids) partids2 = gpb.eid2partid(per_type_eids, etype) assert np.all(F.asnumpy(partids1 == part_id)), 'load a wrong partition' assert np.all(F.asnumpy(partids2 == part_id)), 'load a wrong partition' etypes_list.append(etype) node_feats = {} edge_feats = {} if load_feats: node_feats, edge_feats = load_partition_feats(part_config, part_id) return graph, node_feats, edge_feats, gpb, graph_name, ntypes_list, etypes_list " 8170,"def _convert_pixels(edge): """""" Helper function to convert a list of edge pixels of the form [(x0, y0), (x1, y1),..., (xn,yn)] into the form ([x0, x1,...,xn], [y0, y1,...,yn]) Parameter --------- edge : ~list A list of pairs of tuples. Returns ------- pixel_edge : ~tuple A tuple containing two lists. The first entry is a list containing the first coordinate of each pixel, and similarly for the second entry. """""" x = [p[0] for p in edge.value] * u.pix y = [p[1] for p in edge.value] * u.pix return x, y ","def _convert_pixels(edge): """""" Helper function to convert a list of edge pixels of the form [(x0, y0), (x1, y1),..., (xn,yn)] into the form ([x0, x1,...,xn], [y0, y1,...,yn]) Parameter --------- edge : list A list of pairs of tuples. Returns ------- pixel_edge : ~tuple A tuple containing two lists. The first entry is a list containing the first coordinate of each pixel, and similarly for the second entry. """""" x = [p[0] for p in edge.value] * u.pix y = [p[1] for p in edge.value] * u.pix return x, y " 49299,"def display_for_field(value, field, empty_value_display): from django.contrib.admin.templatetags.admin_list import _boolean_icon if getattr(field, ""flatchoices"", None): flatchoices = field.flatchoices if not isinstance(flatchoices, abc.Hashable) or not isinstance( value, abc.Hashable ): flatchoices = make_hashable(flatchoices) value = make_hashable(value) return dict(flatchoices).get(value, empty_value_display) # BooleanField needs special-case null-handling, so it comes before the # general null test. elif isinstance(field, models.BooleanField): return _boolean_icon(value) elif value is None: return empty_value_display elif isinstance(field, models.DateTimeField): return formats.localize(timezone.template_localtime(value)) elif isinstance(field, (models.DateField, models.TimeField)): return formats.localize(value) elif isinstance(field, models.DecimalField): return formats.number_format(value, field.decimal_places) elif isinstance(field, (models.IntegerField, models.FloatField)): return formats.number_format(value) elif isinstance(field, models.FileField) and value: return format_html('{}', value.url, value) elif isinstance(field, models.JSONField) and value: try: return json.dumps(value, ensure_ascii=False, cls=field.encoder) except TypeError: return display_for_value(value, empty_value_display) else: return display_for_value(value, empty_value_display) ","def display_for_field(value, field, empty_value_display): from django.contrib.admin.templatetags.admin_list import _boolean_icon if getattr(field, ""flatchoices"", None): flatchoices = field.flatchoices try: return dict(flatchoices).get(value, empty_value_display) except TypeError: flatchoices = make_hashable(flatchoices) value = make_hashable(value) return dict(flatchoices).get(value, empty_value_display) # BooleanField needs special-case null-handling, so it comes before the # general null test. elif isinstance(field, models.BooleanField): return _boolean_icon(value) elif value is None: return empty_value_display elif isinstance(field, models.DateTimeField): return formats.localize(timezone.template_localtime(value)) elif isinstance(field, (models.DateField, models.TimeField)): return formats.localize(value) elif isinstance(field, models.DecimalField): return formats.number_format(value, field.decimal_places) elif isinstance(field, (models.IntegerField, models.FloatField)): return formats.number_format(value) elif isinstance(field, models.FileField) and value: return format_html('{}', value.url, value) elif isinstance(field, models.JSONField) and value: try: return json.dumps(value, ensure_ascii=False, cls=field.encoder) except TypeError: return display_for_value(value, empty_value_display) else: return display_for_value(value, empty_value_display) " 24354,"def should_bypass_proxy(url, no_proxy_uris): # Accepts a URL and a list of no_proxy URIs # Returns True if URL should bypass the proxy. parsed_uri = urlparse(url).hostname if '*' in no_proxy_uris: # A single * character is supported, which matches all hosts, and effectively disables the proxy. # See: https://curl.haxx.se/libcurl/c/CURLOPT_NOPROXY.html return True for no_proxy_uri in no_proxy_uris: try: # If no_proxy_uri is an IP or IP CIDR. # A ValueError is raised if address does not represent a valid IPv4 or IPv6 address. ipnetwork = ip_network(ensure_unicode(no_proxy_uri)) ipaddress = ip_address(ensure_unicode(parsed_uri)) if ipaddress in ipnetwork: return True except ValueError: # Treat no_proxy_uri as a domain name # A domain name matches that name and all subdomains. # e.g. ""foo.com"" matches ""foo.com"" and ""bar.foo.com"" # A domain name with a leading ""."" matches subdomains only. # e.g. "".y.com"" matches ""x.y.com"" but not ""y.com"". if no_proxy_uri.startswith((""."", ""*."")): # Support wildcard subdomain; treat as leading dot ""."" # e.g. ""*.example.domain"" as "".example.domain"" dot_no_proxy_uri = no_proxy_uri.strip(""*"") else: # Used for matching subdomains. dot_no_proxy_uri = "".{}"".format(no_proxy_uri) if no_proxy_uri == parsed_uri or parsed_uri.endswith(dot_no_proxy_uri): return True return False ","def should_bypass_proxy(url, no_proxy_uris): # Accepts a URL and a list of no_proxy URIs # Returns True if URL should bypass the proxy. parsed_uri = urlparse(url).hostname if '*' in no_proxy_uris: # A single * character is supported, which matches all hosts, and effectively disables the proxy. # See: https://curl.haxx.se/libcurl/c/CURLOPT_NOPROXY.html return True for no_proxy_uri in no_proxy_uris: try: # If no_proxy_uri is an IP or IP CIDR. # A ValueError is raised if address does not represent a valid IPv4 or IPv6 address. ipnetwork = ip_network(ensure_unicode(no_proxy_uri)) ipaddress = ip_address(ensure_unicode(parsed_uri)) if ipaddress in ipnetwork: return True except ValueError: # Treat no_proxy_uri as a domain name # A domain name matches that name and all subdomains. # e.g. ""foo.com"" matches ""foo.com"" and ""bar.foo.com"" # A domain name with a leading ""."" matches subdomains only. # e.g. "".y.com"" matches ""x.y.com"" but not ""y.com"". if no_proxy_uri.startswith((""."", ""*."")): # Support wildcard subdomain; treat as leading dot ""."" # e.g. ""*.example.domain"" as "".example.domain"" dot_no_proxy_uri = no_proxy_uri.lstrip(""*"") else: # Used for matching subdomains. dot_no_proxy_uri = "".{}"".format(no_proxy_uri) if no_proxy_uri == parsed_uri or parsed_uri.endswith(dot_no_proxy_uri): return True return False " 32190,"def get_global_counters(topology: Topology, device_filter_string: str = None) -> ShowCounterGlobalCommmandResult: """""" Gets global counter information from all the PAN-OS firewalls in the topology :param topology: `Topology` instance !no-auto-argument :param device_filter_string: String to filter to only show specific hostnames or serial numbers. """""" result: ShowCounterGlobalCommmandResult = FirewallCommand.get_counter_global(topology, device_filter_string) return result ","def get_global_counters(topology: Topology, device_filter_string: str = None) -> ShowCounterGlobalCommmandResult: """""" Gets global counter information from all the PAN-OS firewalls in the topology :param topology: `Topology` instance !no-auto-argument :param device_filter_string: String to filter to only show specific hostnames or serial numbers. """""" return FirewallCommand.get_counter_global(topology, device_filter_string) " 46175,"def is_colinear(points): """"""Determines is a list of 2D points are colinear Parameters ------- points : np.ndarray Nx2 array of points to be tested for colinearity Returns ------- val : bool True is all points are colinear, False otherwise. """""" if len(points) < 3: return True for p in points[2:]: if orientation(points[0], points[1], p) != 0: return False return True ","def is_colinear(points): """"""Determines is a list of 2D points are colinear Parameters ------- points : np.ndarray Points to be tested for colinearity Returns ------- val : bool True is all points are colinear, False otherwise. """""" if len(points) < 3: return True for p in points[2:]: if orientation(points[0], points[1], p) != 0: return False return True " 19999,"def test_plantcv_transform_find_color_card_bad_thresh_input(): # Load rgb image rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG)) with pytest.raises(RuntimeError): pcv.params.debug = None _, _, _ = pcv.transform.find_color_card(img=rgb_img, threshold='gaussian') ","def test_plantcv_transform_find_color_card_bad_thresh_input(): # Load rgb image rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG)) with pytest.raises(RuntimeError): pcv.params.debug = None _, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, threshold='gaussian') " 50038,"def _run(handle_data, initialize, before_trading_start, analyze, algofile, algotext, defines, data_frequency, capital_base, bundle, bundle_timestamp, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, environ, blotter, benchmark_spec): """"""Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`. """""" bundle_data = bundles.load( bundle, environ, bundle_timestamp, ) if trading_calendar is None: trading_calendar = get_calendar('XNYS') # date parameter validation if trading_calendar.session_distance(start, end) < 1: raise _RunAlgoError( 'There are no trading days between %s and %s' % ( start.date(), end.date(), ), ) benchmark_sid, benchmark_returns = benchmark_spec.resolve( asset_finder=bundle_data.asset_finder, start_date=start, end_date=end, ) if algotext is not None: if local_namespace: ip = get_ipython() # noqa namespace = ip.user_ns else: namespace = {} for assign in defines: try: name, value = assign.split('=', 2) except ValueError: raise ValueError( 'invalid define %r, should be of the form name=value' % assign, ) try: # evaluate in the same namespace so names may refer to # eachother namespace[name] = eval(value, namespace) except Exception as e: raise ValueError( 'failed to execute definition for name %r: %s' % (name, e), ) elif defines: raise _RunAlgoError( 'cannot pass define without `algotext`', ""cannot pass '-D' / '--define' without '-t' / '--algotext'"", ) else: namespace = {} if algofile is not None: algotext = algofile.read() if print_algo: if PYGMENTS: highlight( algotext, PythonLexer(), TerminalFormatter(), outfile=sys.stdout, ) else: click.echo(algotext) first_trading_day = \ bundle_data.equity_minute_bar_reader.first_trading_day data = DataPortal( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=first_trading_day, equity_minute_reader=bundle_data.equity_minute_bar_reader, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader, ) pipeline_loader = USEquityPricingLoader.without_fx( bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader, ) def choose_loader(column): if column in USEquityPricing.columns: return pipeline_loader raise ValueError( ""No PipelineLoader registered for column %s."" % column ) if isinstance(metrics_set, six.string_types): try: metrics_set = metrics.load(metrics_set) except ValueError as e: raise _RunAlgoError(str(e)) if isinstance(blotter, six.string_types): try: blotter = load(Blotter, blotter) except ValueError as e: raise _RunAlgoError(str(e)) try: perf = TradingAlgorithm( namespace=namespace, data_portal=data, get_pipeline_loader=choose_loader, trading_calendar=trading_calendar, sim_params=SimulationParameters( start_session=start, end_session=end, trading_calendar=trading_calendar, capital_base=capital_base, data_frequency=data_frequency, ), metrics_set=metrics_set, blotter=blotter, benchmark_returns=benchmark_returns, benchmark_sid=benchmark_sid, **{ 'initialize': initialize, 'handle_data': handle_data, 'before_trading_start': before_trading_start, 'analyze': analyze, } if algotext is None else { 'algo_filename': getattr(algofile, 'name', ''), 'script': algotext, } ).run() except NoBenchmark: raise _RunAlgoError( ( 'no ``benchmark_spec`` was provided and' ' ``zipline.api.set_benchmark`` was not called in' ' ``initialize``' ), ( ""neither '--benchmark-symbol' nor '--benchmark-sid' was"" "" provided and ``zipline.api.set_benchmark`` was not called"" "" in ``initialize``, did you mean to pass '--no-benchmark'"" ), ) if output == '-': click.echo(str(perf)) elif output != os.devnull: # make the zipline magic not write any data perf.to_pickle(output) return perf ","def _run(handle_data, initialize, before_trading_start, analyze, algofile, algotext, defines, data_frequency, capital_base, bundle, bundle_timestamp, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, environ, blotter, benchmark_spec): """"""Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`. """""" bundle_data = bundles.load( bundle, environ, bundle_timestamp, ) if trading_calendar is None: trading_calendar = get_calendar('XNYS') # date parameter validation if trading_calendar.session_distance(start, end) < 1: raise _RunAlgoError( 'There are no trading days between %s and %s' % ( start.date(), end.date(), ), ) benchmark_sid, benchmark_returns = benchmark_spec.resolve( asset_finder=bundle_data.asset_finder, start_date=start, end_date=end, ) if algotext is not None: if local_namespace: ip = get_ipython() # noqa namespace = ip.user_ns else: namespace = {} for assign in defines: try: name, value = assign.split('=', 2) except ValueError: raise ValueError( 'invalid define %r, should be of the form name=value' % assign, ) try: # evaluate in the same namespace so names may refer to # eachother namespace[name] = eval(value, namespace) except Exception as e: raise ValueError( 'failed to execute definition for name %r: %s' % (name, e), ) elif defines: raise _RunAlgoError( 'cannot pass define without `algotext`', ""cannot pass '-D' / '--define' without '-t' / '--algotext'"", ) else: namespace = {} if algofile is not None: algotext = algofile.read() if print_algo: if PYGMENTS: highlight( algotext, PythonLexer(), TerminalFormatter(), outfile=sys.stdout, ) else: click.echo(algotext) first_trading_day = \ bundle_data.equity_minute_bar_reader.first_trading_day data = DataPortal( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=first_trading_day, equity_minute_reader=bundle_data.equity_minute_bar_reader, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader, ) pipeline_loader = USEquityPricingLoader.without_fx( bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader, ) def choose_loader(column): if column in USEquityPricing.columns: return pipeline_loader raise ValueError( ""No PipelineLoader registered for column %s."" % column ) if isinstance(metrics_set, six.string_types): try: metrics_set = metrics.load(metrics_set) except ValueError as e: raise _RunAlgoError(str(e)) if isinstance(blotter, six.string_types): try: blotter = load(Blotter, blotter) except ValueError as e: raise _RunAlgoError(str(e)) try: perf = TradingAlgorithm( namespace=namespace, data_portal=data, get_pipeline_loader=choose_loader, trading_calendar=trading_calendar, sim_params=SimulationParameters( start_session=start, end_session=end, trading_calendar=trading_calendar, capital_base=capital_base, data_frequency=data_frequency, ), metrics_set=metrics_set, blotter=blotter, benchmark_returns=benchmark_returns, benchmark_sid=benchmark_sid, **{ 'initialize': initialize, 'handle_data': handle_data, 'before_trading_start': before_trading_start, 'analyze': analyze, } if algotext is None else { 'algo_filename': getattr(algofile, 'name', ''), 'script': algotext, } ).run() except NoBenchmark: raise _RunAlgoError( ( 'no ``benchmark_spec`` was provided and' ' ``zipline.api.set_benchmark`` was not called in' ' ``initialize``' ), ( ""Neither '--benchmark-symbol' nor '--benchmark-sid' was"" "" provided, and ``zipline.api.set_benchmark`` was not called"" "" in ``initialize``. Did you mean to pass '--no-benchmark'?"" ), ) if output == '-': click.echo(str(perf)) elif output != os.devnull: # make the zipline magic not write any data perf.to_pickle(output) return perf " 35470,"def handle_fan_tici(controller, max_cpu_temp, fan_speed, ignition): global last_ignition controller.neg_limit = -(80 if ignition else 30) controller.pos_limit = -(30 if ignition else 0) if ignition != last_ignition: controller.reset() fan_pwr_out = -int(controller.update( setpoint=(75), measurement=max_cpu_temp, feedforward=interp(max_cpu_temp, [60.0, 100.0], [0, -80]) )) last_ignition = ignition return fan_pwr_out ","def handle_fan_tici(controller, max_cpu_temp, fan_speed, ignition): global last_ignition controller.neg_limit = -(80 if ignition else 30) controller.pos_limit = -(30 if ignition else 0) if ignition != last_ignition: controller.reset() fan_pwr_out = -int(controller.update( setpoint=75, measurement=max_cpu_temp, feedforward=interp(max_cpu_temp, [60.0, 100.0], [0, -80]) )) last_ignition = ignition return fan_pwr_out " 32351,"def rasterize(path: str, width: int, height: int, r_type: RasterizeType = RasterizeType.PNG, wait_time: int = 0, offline_mode: bool = False, max_page_load_time: int = 180, full_screen: bool = False, r_mode: RasterizeMode = RasterizeMode.WEBDRIVER_PREFERED): """""" Capturing a snapshot of a path (url/file), using Chrome Driver :param offline_mode: when set to True, will block any outgoing communication :param path: file path, or website url :param width: desired snapshot width in pixels :param height: desired snapshot height in pixels :param r_type: result type: .png/.pdf :param wait_time: time in seconds to wait before taking a screenshot :param max_page_load_time: amount of time to wait for a page load to complete before throwing an error :param full_screen: when set to True, the snapshot will take the whole page :param r_mode: rasterizing mode see: RasterizeMode enum. """""" demisto.debug(f'Rasterizing using mode: {r_mode}') page_load_time = max_page_load_time if max_page_load_time > 0 else DEFAULT_PAGE_LOAD_TIME rasterize_funcs: Tuple[Callable, ...] = () if r_mode == RasterizeMode.WEBDRIVER_PREFERED: rasterize_funcs = (rasterize_webdriver, rasterize_headless_cmd) elif r_mode == RasterizeMode.WEBDRIVER_ONLY: rasterize_funcs = (rasterize_webdriver,) elif r_mode == RasterizeMode.HEADLESS_CLI_PREFERED: rasterize_funcs = (rasterize_headless_cmd, rasterize_webdriver) elif r_mode == RasterizeMode.HEADLESS_CLI_ONLY: rasterize_funcs = (rasterize_headless_cmd,) else: # should never happen as we use an enum demisto.error(f'Unknown rasterize mode: {r_mode}') raise ValueError(f'Unknown rasterize mode: {r_mode}') try: for i, r_func in enumerate(rasterize_funcs): # type: ignore[var-annotated] try: return r_func(path=path, width=width, height=height, r_type=r_type, wait_time=wait_time, # type: ignore[misc] offline_mode=offline_mode, max_page_load_time=page_load_time, full_screen=full_screen) except Exception as ex: if i < (len(rasterize_funcs) - 1): demisto.info(f'Failed rasterize preferred option trying second option. Exception: {ex}') else: demisto.info(f'Failed rasterizing using all avialable options. Raising last exception: {ex}') raise except (InvalidArgumentException, NoSuchElementException) as ex: if 'invalid argument' in str(ex): err_msg = URL_ERROR_MSG + str(ex) return_err_or_warn(err_msg) else: return_err_or_warn(f'Invalid exception: {ex}\nTrace:{traceback.format_exc()}') except (TimeoutException, subprocess.TimeoutExpired) as ex: return_err_or_warn(f'Timeout exception with max load time of: {page_load_time} seconds. {ex}') except Exception as ex: err_str = f'General error: {ex}\nTrace:{traceback.format_exc()}' demisto.error(err_str) return_err_or_warn(err_str) ","def rasterize(path: str, width: int, height: int, r_type: RasterizeType = RasterizeType.PNG, wait_time: int = 0, offline_mode: bool = False, max_page_load_time: int = 180, full_screen: bool = False, r_mode: RasterizeMode = RasterizeMode.WEBDRIVER_PREFERED): """""" Capturing a snapshot of a path (url/file), using Chrome Driver :param offline_mode: when set to True, will block any outgoing communication :param path: file path, or website url :param width: desired snapshot width in pixels :param height: desired snapshot height in pixels :param r_type: result type: .png/.pdf :param wait_time: time in seconds to wait before taking a screenshot :param max_page_load_time: amount of time to wait for a page load to complete before throwing an error :param full_screen: when set to True, the snapshot will take the whole page :param r_mode: rasterizing mode see: RasterizeMode enum. """""" demisto.debug(f'Rasterizing using mode: {r_mode}') page_load_time = max_page_load_time if max_page_load_time > 0 else DEFAULT_PAGE_LOAD_TIME rasterize_funcs: Tuple[Callable, ...] = () if r_mode == RasterizeMode.WEBDRIVER_PREFERED: rasterize_funcs = (rasterize_webdriver, rasterize_headless_cmd) elif r_mode == RasterizeMode.WEBDRIVER_ONLY: rasterize_funcs = (rasterize_webdriver,) elif r_mode == RasterizeMode.HEADLESS_CLI_PREFERED: rasterize_funcs = (rasterize_headless_cmd, rasterize_webdriver) elif r_mode == RasterizeMode.HEADLESS_CLI_ONLY: rasterize_funcs = (rasterize_headless_cmd,) else: # should never happen as we use an enum demisto.error(f'Unknown rasterize mode: {r_mode}') raise ValueError(f'Unknown rasterize mode: {r_mode}') try: for i, r_func in enumerate(rasterize_funcs): # type: ignore[var-annotated] try: return r_func(path=path, width=width, height=height, r_type=r_type, wait_time=wait_time, # type: ignore[misc] offline_mode=offline_mode, max_page_load_time=page_load_time, full_screen=full_screen) except Exception as ex: if i < (len(rasterize_funcs) - 1): demisto.info(f'Failed rasterize preferred option trying second option. Exception: {ex}') else: demisto.info(f'Failed rasterizing using all available options. Raising last exception: {ex}') raise except (InvalidArgumentException, NoSuchElementException) as ex: if 'invalid argument' in str(ex): err_msg = URL_ERROR_MSG + str(ex) return_err_or_warn(err_msg) else: return_err_or_warn(f'Invalid exception: {ex}\nTrace:{traceback.format_exc()}') except (TimeoutException, subprocess.TimeoutExpired) as ex: return_err_or_warn(f'Timeout exception with max load time of: {page_load_time} seconds. {ex}') except Exception as ex: err_str = f'General error: {ex}\nTrace:{traceback.format_exc()}' demisto.error(err_str) return_err_or_warn(err_str) " 20201,"def purge(url=None): akamai_config = settings.WAGTAILFRONTENDCACHE.get('akamai', {}) cloudfront_config = settings.WAGTAILFRONTENDCACHE.get( 'cloudfront', {}) if url: # Use the Wagtail frontendcache PurgeBatch to perform the purge batch = PurgeBatch() batch.add_url(url) # If the URL matches any of our cloudfront distributions, invalidate # with that backend if any(k for k in cloudfront_config.get('DISTRIBUTION_ID', {}) if k in url): logger.info('Purging {} from cloudfront'.format(url)) batch.purge(backends='cloudfront') # Otherwise invalidate with our default backend else: logger.info('Purging {} from akamai'.format(url)) batch.purge(backends='akamai') return ""Submitted invalidation for %s"" % url else: # purge_all only exists on our AkamaiBackend backend = AkamaiBackend(akamai_config) logger.info('Purging entire site from akamai') backend.purge_all() return ""Submitted invalidation for the entire site."" ","def purge(url=None): akamai_config = settings.WAGTAILFRONTENDCACHE.get('akamai', {}) cloudfront_config = settings.WAGTAILFRONTENDCACHE.get( 'cloudfront', {}) if url: # Use the Wagtail frontendcache PurgeBatch to perform the purge batch = PurgeBatch() batch.add_url(url) # If the URL matches any of our cloudfront distributions, invalidate # with that backend if any(k for k in cloudfront_config.get('DISTRIBUTION_ID', {}) if k in url): logger.info('Purging {} from CloudFront'.format(url)) batch.purge(backends='cloudfront') # Otherwise invalidate with our default backend else: logger.info('Purging {} from akamai'.format(url)) batch.purge(backends='akamai') return ""Submitted invalidation for %s"" % url else: # purge_all only exists on our AkamaiBackend backend = AkamaiBackend(akamai_config) logger.info('Purging entire site from akamai') backend.purge_all() return ""Submitted invalidation for the entire site."" " 7920,"def test_cell_rotation(pincell_model_w_univ): # Cell 1 is filled with a material so we cannot rotate it, but we can get # its rotation matrix (which will be the identity matrix) cell = openmc.lib.cells[1] assert cell.get_rotation() is None with pytest.raises(exc.GeometryError, match='not filled with'): cell.set_rotation(np.array([180., 0., 0.])) # Now repeat with Cell 2 and we will be allowed to do it cell = openmc.lib.cells[2] assert cell.get_rotation() is None cell.set_rotation(np.array([180., 0., 0.])) assert cell.get_rotation() == pytest.approx([180., 0., 0.]) ","def test_cell_rotation(pincell_model_w_univ): # Cell 1 is filled with a material so we cannot rotate it, but we can get # its rotation matrix (which will be the identity matrix) cell = openmc.lib.cells[1] assert cell.get_rotation() is None with pytest.raises(exc.GeometryError, match='not filled with'): cell.set_rotation(np.array([180., 0., 0.])) # Now repeat with Cell 2 and we will be allowed to do it cell = openmc.lib.cells[2] assert cell.get_rotation() is None cell.set_rotation((180., 0., 0.)) assert cell.get_rotation() == pytest.approx([180., 0., 0.]) " 34662,"def add_endpoint_param( parser: Union[argparse.ArgumentParser, argparse._ActionsContainer], help_text: Text, default: Optional[Text] = DEFAULT_ENDPOINTS_PATH, ) -> None: """"""Add an option to an argument parser to configure endpoints path."""""" parser.add_argument(""--endpoints"", type=str, default=default, help=help_text) ","def add_endpoint_param( parser: Union[argparse.ArgumentParser, argparse._ActionsContainer], help_text: Text, default: Optional[Text] = DEFAULT_ENDPOINTS_PATH, ) -> None: """"""Adds an option to an argument parser to configure endpoints path."""""" parser.add_argument(""--endpoints"", type=str, default=default, help=help_text) " 31150,"def main(): SESSION.proxies = handle_proxy() client = SixgillEnrichClient( demisto.params()[""client_id""], demisto.params()[""client_secret""], CHANNEL_CODE, demisto, SESSION, VERIFY ) command = demisto.command() demisto.info(f""Command being called is {command}"") commands: Dict[str, Callable] = { ""test-module"": test_module_command, } try: if demisto.command() == ""ip"": return_results(ip_reputation_command(client, demisto.args())) elif demisto.command() == ""domain"": return_results(domain_reputation_command(client, demisto.args())) elif demisto.command() == ""url"": return_results(url_reputation_command(client, demisto.args())) elif demisto.command() == ""file"": return_results(file_reputation_command(client, demisto.args())) elif demisto.command() == ""actor"": return_results(actor_reputation_command(client, demisto.args())) elif demisto.command() == ""post_id"": return_results(postid_reputation_command(client, demisto.args())) else: readable_output, outputs, raw_response = commands[command](client, demisto.args()) return_outputs(readable_output, outputs, raw_response) except Exception as e: demisto.error(traceback.format_exc()) return_error(f""Error failed to execute {demisto.command()}, error: [{e}]"") ","def main(): SESSION.proxies = handle_proxy() client = SixgillEnrichClient( demisto.params()[""client_id""], demisto.params()[""client_secret""], CHANNEL_CODE, demisto, SESSION, VERIFY ) command = demisto.command() demisto.info(f""Command being called is {command}"") commands: Dict[str, Callable] = { ""test-module"": test_module_command, } try: if demisto.command() == ""ip"": return_results(ip_reputation_command(client, demisto.args())) elif demisto.command() == ""domain"": return_results(domain_reputation_command(client, demisto.args())) elif demisto.command() == ""url"": return_results(url_reputation_command(client, demisto.args())) elif demisto.command() == ""file"": return_results(file_reputation_command(client, demisto.args())) elif command == ""actor"": return_results(actor_reputation_command(client, demisto.args())) elif demisto.command() == ""post_id"": return_results(postid_reputation_command(client, demisto.args())) else: readable_output, outputs, raw_response = commands[command](client, demisto.args()) return_outputs(readable_output, outputs, raw_response) except Exception as e: demisto.error(traceback.format_exc()) return_error(f""Error failed to execute {demisto.command()}, error: [{e}]"") " 23160,"def _compute_multiplier(limit: int, dtype, largest_block: int, result): """""" Utility function for auto_chunk, to fin how much larger or smaller the ideal chunk size is relative to what we have now. """""" return ( limit / dtype.itemsize / largest_block / math.prod(r for r in result.values() if r != 0) ) ","def _compute_multiplier(limit: int, dtype, largest_block: int, result): """""" Utility function for auto_chunk, to fin how much larger or smaller the ideal chunk size is relative to what we have now. """""" return ( limit / dtype.itemsize / largest_block / math.prod(r for r in result.values() if r) ) " 39886,"def make_cli_character(character_config, click_config, dev: bool = False, teacher_uri: str = None, min_stake: int = 0, **config_args): emitter = click_config.emitter # # Pre-Init # # Handle Keyring if not dev: character_config.attach_keyring() unlock_nucypher_keyring(emitter, character_configuration=character_config, password=get_nucypher_password(confirm=False)) # Handle Teachers teacher_nodes = load_seednodes(emitter, teacher_uris=[teacher_uri] if teacher_uri else None, min_stake=min_stake, federated_only=character_config.federated_only, network_domains=character_config.domains, network_middleware=character_config.network_middleware, registry=character_config.registry) # # Character Init # # Produce Character try: CHARACTER = character_config(known_nodes=teacher_nodes, network_middleware=character_config.network_middleware, **config_args) except CryptoError: raise character_config.keyring.AuthenticationFailed(""Failed keyring unlocking. "" ""Are you sure you provided the correct password?"") # # Post-Init # if CHARACTER.controller is not NO_CONTROL_PROTOCOL: CHARACTER.controller.emitter = emitter # TODO: set it on object creation? Or not set at all? # Federated if character_config.federated_only: emitter.message(""WARNING: Running in Federated mode"", color='yellow') return CHARACTER ","def make_cli_character(character_config, click_config, dev: bool = False, teacher_uri: str = None, min_stake: int = 0, **config_args): emitter = click_config.emitter # # Pre-Init # # Handle Keyring if not dev: character_config.attach_keyring() unlock_nucypher_keyring(emitter, character_configuration=character_config, password=get_nucypher_password(confirm=False)) # Handle Teachers teacher_nodes = load_seednodes(emitter, teacher_uris=[teacher_uri] if teacher_uri else None, min_stake=min_stake, federated_only=character_config.federated_only, network_domains=character_config.domains, network_middleware=character_config.network_middleware, registry=character_config.registry) # # Character Init # # Produce Character try: CHARACTER = character_config(known_nodes=teacher_nodes, network_middleware=character_config.network_middleware, **config_args) except CryptoError: raise character_config.keyring.AuthenticationFailed(""Failed to unlock keyring. "" ""Are you sure you provided the correct password?"") # # Post-Init # if CHARACTER.controller is not NO_CONTROL_PROTOCOL: CHARACTER.controller.emitter = emitter # TODO: set it on object creation? Or not set at all? # Federated if character_config.federated_only: emitter.message(""WARNING: Running in Federated mode"", color='yellow') return CHARACTER " 43912,"def second_order_param_shift(tape, dev_wires, argnum=None, shift=np.pi / 2, gradient_recipes=None): r""""""Generate the second-order CV parameter-shift tapes and postprocessing methods required to compute the gradient of a gate parameter with respect to an expectation value. .. note:: The 2nd order method can handle also first-order observables, but 1st order method may be more efficient unless it's really easy to experimentally measure arbitrary 2nd order observables. .. warning:: The 2nd order method can only be executed on devices that support the :class:`~.PolyXP` observable. Args: tape (.QuantumTape): quantum tape to differentiate dev_wires (.Wires): wires on the device the parameter-shift method is computed on argnum (int or list[int] or None): Trainable parameter indices to differentiate with respect to. If not provided, the derivative with respect to all trainable indices are returned. shift (float): The shift value to use for the two-term parameter-shift formula. Only valid if the operation in question supports the two-term parameter-shift rule (that is, it has two distinct eigenvalues) and ``gradient_recipes`` is ``None``. gradient_recipes (tuple(list[list[float]] or None)): List of gradient recipes for the parameter-shift method. One gradient recipe must be provided per trainable parameter. Returns: tuple[list[QuantumTape], function]: A tuple containing a list of generated tapes, in addition to a post-processing function to be applied to the evaluated tapes. """""" argnum = argnum or list(tape.trainable_params) gradient_recipes = gradient_recipes or [None] * len(argnum) gradient_tapes = [] shapes = [] obs_indices = [] gradient_values = [] for idx, _ in enumerate(tape.trainable_params): t_idx = list(tape.trainable_params)[idx] op = tape._par_info[t_idx][""op""] if idx not in argnum: # parameter has zero gradient shapes.append(0) obs_indices.append([]) gradient_values.append([]) continue shapes.append(1) # get the gradient recipe for the trainable parameter recipe = gradient_recipes[argnum.index(idx)] recipe = recipe or _get_operation_recipe(tape, idx, shift=shift) recipe = _process_gradient_recipe(recipe) coeffs, multipliers, shifts = recipe if len(shifts) != 2: # The 2nd order CV parameter-shift rule only accepts two-term shifts raise NotImplementedError( ""Taking the analytic gradient for order-2 operators is "" f""unsupported for operation {op} which has a "" ""gradient recipe of more than two terms."" ) shifted_tapes = generate_shifted_tapes(tape, idx, shifts, multipliers) # evaluate transformed observables at the original parameter point # first build the Heisenberg picture transformation matrix Z Z0 = op.heisenberg_tr(dev_wires, inverse=True) Z2 = shifted_tapes[0]._par_info[t_idx][""op""].heisenberg_tr(dev_wires) Z1 = shifted_tapes[1]._par_info[t_idx][""op""].heisenberg_tr(dev_wires) # derivative of the operation Z = Z2 * coeffs[0] + Z1 * coeffs[1] Z = Z @ Z0 # conjugate Z with all the descendant operations B = np.eye(1 + 2 * len(dev_wires)) B_inv = B.copy() succ = tape.graph.descendants_in_order((op,)) operation_descendents = itertools.filterfalse(qml.circuit_graph._is_observable, succ) observable_descendents = filter(qml.circuit_graph._is_observable, succ) for BB in operation_descendents: if not BB.supports_heisenberg: # if the descendant gate is non-Gaussian in parameter-shift differentiation # mode, then there must be no observable following it. continue B = BB.heisenberg_tr(dev_wires) @ B B_inv = B_inv @ BB.heisenberg_tr(dev_wires, inverse=True) Z = B @ Z @ B_inv # conjugation g_tape = tape.copy(copy_operations=True) constants = [] # transform the descendant observables into their derivatives using Z transformed_obs_idx = [] for obs in observable_descendents: # get the index of the descendent observable idx = tape.observables.index(obs) transformed_obs_idx.append(idx) transformed_obs = _transform_observable(obs, Z, dev_wires) A = transformed_obs.parameters[0] constant = None # Check if the transformed observable corresponds to a constant term. if len(A.nonzero()[0]) == 1: if A.ndim == 2 and A[0, 0] != 0: constant = A[0, 0] elif A.ndim == 1 and A[0] != 0: constant = A[0] constants.append(constant) g_tape._measurements[idx] = qml.measure.MeasurementProcess( qml.operation.Expectation, _transform_observable(obs, Z, dev_wires) ) if not any(i is None for i in constants): # Check if *all* transformed observables corresponds to a constant term. # term. If this is the case for all transformed observables on the tape, # then = A = A, # and we can avoid the device execution. shapes[-1] = 0 obs_indices.append(transformed_obs_idx) gradient_values.append(constants) continue gradient_tapes.append(g_tape) obs_indices.append(transformed_obs_idx) gradient_values.append(None) def processing_fn(results): grads = [] start = 0 if not results: results = [np.zeros([tape.output_dim])] interface = qml.math.get_interface(results[0]) iterator = enumerate(zip(shapes, gradient_values, obs_indices)) for i, (shape, grad_value, obs_ind) in iterator: if shape == 0: # parameter has zero gradient g = qml.math.zeros_like(results[0], like=interface) if grad_value: g = qml.math.scatter_element_add(g, obs_ind, grad_value, like=interface) grads.append(g) continue obs_result = results[start : start + shape] start = start + shape # compute the linear combination of results and coefficients obs_result = qml.math.stack(obs_result[0]) g = qml.math.zeros_like(obs_result, like=interface) if qml.math.get_interface(g) not in (""tensorflow"", ""autograd""): obs_ind = (obs_ind,) g = qml.math.scatter_element_add(g, obs_ind, obs_result[obs_ind], like=interface) grads.append(g) # The following is for backwards compatibility; currently, # the device stacks multiple measurement arrays, even if not the same # size, resulting in a ragged array. # In the future, we might want to change this so that only tuples # of arrays are returned. for i, g in enumerate(grads): g = qml.math.convert_like(g, results[0]) if hasattr(g, ""dtype"") and g.dtype is np.dtype(""object""): grads[i] = qml.math.hstack(g) return qml.math.T(qml.math.stack(grads)) return gradient_tapes, processing_fn ","def second_order_param_shift(tape, dev_wires, argnum=None, shift=np.pi / 2, gradient_recipes=None): r""""""Generate the second-order CV parameter-shift tapes and postprocessing methods required to compute the gradient of a gate parameter with respect to an expectation value. .. note:: The 2nd order method can handle also first-order observables, but 1st order method may be more efficient unless it's really easy to experimentally measure arbitrary 2nd order observables. .. warning:: The 2nd order method can only be executed on devices that support the :class:`~.PolyXP` observable. Args: tape (.QuantumTape): quantum tape to differentiate dev_wires (.Wires): wires on the device the parameter-shift method is computed on argnum (int or list[int] or None): Trainable parameter indices to differentiate with respect to. If not provided, the derivative with respect to all trainable indices are returned. shift (float): The shift value to use for the two-term parameter-shift formula. Only valid if the operation in question supports the two-term parameter-shift rule (that is, it has two distinct eigenvalues) and ``gradient_recipes`` is ``None``. gradient_recipes (tuple(list[list[float]] or None)): List of gradient recipes for the parameter-shift method. One gradient recipe must be provided per trainable parameter. Returns: This gradient transform can be applied directly to :class:`~.QNode` objects: tuple[list[QuantumTape], function]: A tuple containing a list of generated tapes, in addition to a post-processing function to be applied to the evaluated tapes. """""" argnum = argnum or list(tape.trainable_params) gradient_recipes = gradient_recipes or [None] * len(argnum) gradient_tapes = [] shapes = [] obs_indices = [] gradient_values = [] for idx, _ in enumerate(tape.trainable_params): t_idx = list(tape.trainable_params)[idx] op = tape._par_info[t_idx][""op""] if idx not in argnum: # parameter has zero gradient shapes.append(0) obs_indices.append([]) gradient_values.append([]) continue shapes.append(1) # get the gradient recipe for the trainable parameter recipe = gradient_recipes[argnum.index(idx)] recipe = recipe or _get_operation_recipe(tape, idx, shift=shift) recipe = _process_gradient_recipe(recipe) coeffs, multipliers, shifts = recipe if len(shifts) != 2: # The 2nd order CV parameter-shift rule only accepts two-term shifts raise NotImplementedError( ""Taking the analytic gradient for order-2 operators is "" f""unsupported for operation {op} which has a "" ""gradient recipe of more than two terms."" ) shifted_tapes = generate_shifted_tapes(tape, idx, shifts, multipliers) # evaluate transformed observables at the original parameter point # first build the Heisenberg picture transformation matrix Z Z0 = op.heisenberg_tr(dev_wires, inverse=True) Z2 = shifted_tapes[0]._par_info[t_idx][""op""].heisenberg_tr(dev_wires) Z1 = shifted_tapes[1]._par_info[t_idx][""op""].heisenberg_tr(dev_wires) # derivative of the operation Z = Z2 * coeffs[0] + Z1 * coeffs[1] Z = Z @ Z0 # conjugate Z with all the descendant operations B = np.eye(1 + 2 * len(dev_wires)) B_inv = B.copy() succ = tape.graph.descendants_in_order((op,)) operation_descendents = itertools.filterfalse(qml.circuit_graph._is_observable, succ) observable_descendents = filter(qml.circuit_graph._is_observable, succ) for BB in operation_descendents: if not BB.supports_heisenberg: # if the descendant gate is non-Gaussian in parameter-shift differentiation # mode, then there must be no observable following it. continue B = BB.heisenberg_tr(dev_wires) @ B B_inv = B_inv @ BB.heisenberg_tr(dev_wires, inverse=True) Z = B @ Z @ B_inv # conjugation g_tape = tape.copy(copy_operations=True) constants = [] # transform the descendant observables into their derivatives using Z transformed_obs_idx = [] for obs in observable_descendents: # get the index of the descendent observable idx = tape.observables.index(obs) transformed_obs_idx.append(idx) transformed_obs = _transform_observable(obs, Z, dev_wires) A = transformed_obs.parameters[0] constant = None # Check if the transformed observable corresponds to a constant term. if len(A.nonzero()[0]) == 1: if A.ndim == 2 and A[0, 0] != 0: constant = A[0, 0] elif A.ndim == 1 and A[0] != 0: constant = A[0] constants.append(constant) g_tape._measurements[idx] = qml.measure.MeasurementProcess( qml.operation.Expectation, _transform_observable(obs, Z, dev_wires) ) if not any(i is None for i in constants): # Check if *all* transformed observables corresponds to a constant term. # term. If this is the case for all transformed observables on the tape, # then = A = A, # and we can avoid the device execution. shapes[-1] = 0 obs_indices.append(transformed_obs_idx) gradient_values.append(constants) continue gradient_tapes.append(g_tape) obs_indices.append(transformed_obs_idx) gradient_values.append(None) def processing_fn(results): grads = [] start = 0 if not results: results = [np.zeros([tape.output_dim])] interface = qml.math.get_interface(results[0]) iterator = enumerate(zip(shapes, gradient_values, obs_indices)) for i, (shape, grad_value, obs_ind) in iterator: if shape == 0: # parameter has zero gradient g = qml.math.zeros_like(results[0], like=interface) if grad_value: g = qml.math.scatter_element_add(g, obs_ind, grad_value, like=interface) grads.append(g) continue obs_result = results[start : start + shape] start = start + shape # compute the linear combination of results and coefficients obs_result = qml.math.stack(obs_result[0]) g = qml.math.zeros_like(obs_result, like=interface) if qml.math.get_interface(g) not in (""tensorflow"", ""autograd""): obs_ind = (obs_ind,) g = qml.math.scatter_element_add(g, obs_ind, obs_result[obs_ind], like=interface) grads.append(g) # The following is for backwards compatibility; currently, # the device stacks multiple measurement arrays, even if not the same # size, resulting in a ragged array. # In the future, we might want to change this so that only tuples # of arrays are returned. for i, g in enumerate(grads): g = qml.math.convert_like(g, results[0]) if hasattr(g, ""dtype"") and g.dtype is np.dtype(""object""): grads[i] = qml.math.hstack(g) return qml.math.T(qml.math.stack(grads)) return gradient_tapes, processing_fn " 58397,"def _get_next_clip(start_index, track): """"""Get the next clip with a non-zero duration"""""" # Iterate over the following clips and return the first ""real"" one for index in range(start_index + 1, len(track)): clip = track[index] if clip.duration().value > 0: return clip return None ","def _get_next_clip(start_index, track): """"""Get the next clip with a non-zero duration"""""" # Iterate over the following clips and return the first ""real"" one for clip in track[start_index+1:]: if clip.duration().value > 0: return clip return None " 24858,"def my_func(self): """"""This is a docstring. Returns ------- :obj:`list` of :obj:`str` List of strings """""" return [""hi"", ""bye""] ","def my_func(self): """"""find_numpy_returns_with_of Returns ------- :obj:`list` of :obj:`str` List of strings """""" return [""hi"", ""bye""] " 36186,"def generate(seed): home_page = get_homepage() reseed(seed) try: bannered_campaign_namespace = WagtailPage.objects.get(title='bannered_campaigns') print('bannered campaigns namespace exists') except WagtailPage.DoesNotExist: print('Generating a bannered campaigns namespace') bannered_campaign_namespace = MiniSiteNamespaceFactory.create( parent=home_page, title='bannered_campaigns', live=False ) reseed(seed) print('Generating Bannered Campaign Pages under namespace') campaigns = [CampaignPageFactory.create(parent=bannered_campaign_namespace) for i in range(5)] reseed(seed) print('Generating Donation Modals for Campaign Pages') [DonationModalsFactory.create(page=campaign) for campaign in campaigns] reseed(seed) ","def generate(seed): home_page = get_homepage() reseed(seed) try: bannered_campaign_namespace = WagtailPage.objects.get(title='campaigns') print('bannered campaigns namespace exists') except WagtailPage.DoesNotExist: print('Generating a bannered campaigns namespace') bannered_campaign_namespace = MiniSiteNamespaceFactory.create( parent=home_page, title='bannered_campaigns', live=False ) reseed(seed) print('Generating Bannered Campaign Pages under namespace') campaigns = [CampaignPageFactory.create(parent=bannered_campaign_namespace) for i in range(5)] reseed(seed) print('Generating Donation Modals for Campaign Pages') [DonationModalsFactory.create(page=campaign) for campaign in campaigns] reseed(seed) " 38279,"def interrupt_handler(sig, frame): try: __spark_context.cancelAllJobs() except Exception as e: print(f""Error occurred while calling handler{e}"") ","def interrupt_handler(sig, frame): try: __spark_context.cancelAllJobs() except Exception as e: print(f""Error occurred while calling handler: {e}"") " 44197,"def factorize(two, tol): r""""""Return double-factorized form of a two-electron tensor. The second quantized electronic Hamiltonian is constructed in terms of fermionic creation, :math:`a^{\dagger}` , and annihilation, :math:`a`, operators as [`arXiv:1902.02134 `_] .. math:: H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger} a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs} h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha}, where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as .. math:: h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right) \phi_q(r) dr, and .. math:: h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|} dr_1 dr_2. Rearranging the integrals in the chemist notation, [11|22], gives .. math:: H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger} a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs} V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}. with .. math:: T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}. and :math:`V` is the two-electron tensor in chemist notation. The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that .. math:: V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}. with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized and truncated in a second level of factorization. The algorithm has the following steps [`arXiv:1902.02134 `_]. 1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \ :math:`n^2 \times n^2` matrix where n is the number of orbitals. 2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \ corresponding eigenvalues larger than a threshold. 3. Reshape the selected eigenvectors to :math:`n \times n` matrices. 4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \ eigenvalues is larger than a threshold. Args: two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis arranged in chemist notation [11|22] tol (float): cutoff value for discarding the negligible factors Returns: tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron tensor, eigenvalues of the generated factors, eigenvectors of the generated factors **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772 >>> mol = qml.qchem.Molecule(symbols, geometry) >>> core, one, two = qml.qchem.electron_integrals(mol)() >>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation >>> l, w, v = factorize(two, 1e-5) >>> print(l) [[[ 1.06723440e-01 9.73575768e-15] [ 8.36288956e-15 -1.04898533e-01]] [[-2.20945401e-13 -4.25688222e-01] [-4.25688222e-01 -2.98228790e-13]] [[-8.14472856e-01 5.01669019e-13] [ 5.01689072e-13 -8.28642140e-01]]] """""" n = two.shape[0] two = two.reshape(n * n, n * n) eigvals, eigvecs = np.linalg.eigh(two) eigvals = np.array([val for val in eigvals if abs(val) > tol]) eigvecs = eigvecs[:, -len(eigvals) :] vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals))) factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))]) eigvals, eigvecs = np.linalg.eigh(factors) eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol]) eigvecs = eigvecs[:, -len(eigvals) :] return factors, eigvals, eigvecs ","def factorize(two, tol): r""""""Return double-factorized form of a two-electron tensor. The second quantized electronic Hamiltonian is constructed in terms of fermionic creation, :math:`a^{\dagger}` , and annihilation, :math:`a`, operators as [`arXiv:1902.02134 `_] .. math:: H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger} a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs} h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha}, where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as .. math:: h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right) \phi_q(r) dr, and .. math:: h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|} dr_1 dr_2. Rearranging the integrals in the chemist notation, [11|22], gives .. math:: H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger} a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs} V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}. with .. math:: T_{pq} = h_{pq} - \frac{1}{2} \sum_s h_{pssq}. and :math:`V` is the two-electron tensor in chemist notation. The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that .. math:: V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}. with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized and truncated in a second level of factorization. The algorithm has the following steps [`arXiv:1902.02134 `_]. 1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \ :math:`n^2 \times n^2` matrix where n is the number of orbitals. 2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \ corresponding eigenvalues larger than a threshold. 3. Reshape the selected eigenvectors to :math:`n \times n` matrices. 4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \ eigenvalues is larger than a threshold. Args: two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis arranged in chemist notation [11|22] tol (float): cutoff value for discarding the negligible factors Returns: tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron tensor, eigenvalues of the generated factors, eigenvectors of the generated factors **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772 >>> mol = qml.qchem.Molecule(symbols, geometry) >>> core, one, two = qml.qchem.electron_integrals(mol)() >>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation >>> l, w, v = factorize(two, 1e-5) >>> print(l) [[[ 1.06723440e-01 9.73575768e-15] [ 8.36288956e-15 -1.04898533e-01]] [[-2.20945401e-13 -4.25688222e-01] [-4.25688222e-01 -2.98228790e-13]] [[-8.14472856e-01 5.01669019e-13] [ 5.01689072e-13 -8.28642140e-01]]] """""" n = two.shape[0] two = two.reshape(n * n, n * n) eigvals, eigvecs = np.linalg.eigh(two) eigvals = np.array([val for val in eigvals if abs(val) > tol]) eigvecs = eigvecs[:, -len(eigvals) :] vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals))) factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))]) eigvals, eigvecs = np.linalg.eigh(factors) eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol]) eigvecs = eigvecs[:, -len(eigvals) :] return factors, eigvals, eigvecs " 32565,"def get_entry_by_file_name(file_name): entries = demisto.executeCommand('getEntries', {}) for entry in reversed(entries): fn = demisto.get(entry, 'File') if type(fn) not in [str, str]: continue if file_name.lower() == fn.lower(): return entry raise ValueError('Was unable to find ""{}"" in the war room. Please ensure the file was uploaded.'.format(file_name)) ","def get_entry_by_file_name(file_name): entries = demisto.executeCommand('getEntries', {}) for entry in reversed(entries): fn = demisto.get(entry, 'File') if not isinstance(fn, str): continue if file_name.lower() == fn.lower(): return entry raise ValueError('Was unable to find ""{}"" in the war room. Please ensure the file was uploaded.'.format(file_name)) " 31018,"def main(): """""" PARSE AND VALIDATE INTEGRATION PARAMS """""" params = demisto.params() api_key = params.get('api_key') verify = not params.get('insecure', False) proxy = params().get('proxy') is True try: command = demisto.command() LOG(f'Command being called is {demisto.command()}') client = Client(api_key=api_key, verify=verify, proxy=proxy) commands = { 'threatvault-antivirus-signature-get': antivirus_signature_get, 'file': file_reputation, 'threatvault-dns-signature-get-by-id': dns_get_by_id, 'threatvault-antispyware-signature-get-by-id': antispyware_get_by_id, 'threatvault-ip-geo-get': ip_geo_get, 'threatvault-antivirus-signature-search': antivirus_signature_search, 'threatvault-dns-signature-search': dns_signature_search, 'threatvault-antispyware-signature-search': antispyware_signature_search, 'threatvault-signature-search-results': signature_search_results, } if demisto.command() == 'test-module': # This is the call made when pressing the integration Test button. return_results(test_module(client)) elif command in commands: return_results(commands[command](client, demisto.args())) else: raise NotImplementedError(f'Command ""{command}"" was not implemented.') except Exception as err: return_error(str(err), err) ","def main(): """""" PARSE AND VALIDATE INTEGRATION PARAMS """""" params = demisto.params() api_key = params.get('api_key') verify = not params.get('insecure', False) proxy = params.get('proxy') try: command = demisto.command() LOG(f'Command being called is {demisto.command()}') client = Client(api_key=api_key, verify=verify, proxy=proxy) commands = { 'threatvault-antivirus-signature-get': antivirus_signature_get, 'file': file_reputation, 'threatvault-dns-signature-get-by-id': dns_get_by_id, 'threatvault-antispyware-signature-get-by-id': antispyware_get_by_id, 'threatvault-ip-geo-get': ip_geo_get, 'threatvault-antivirus-signature-search': antivirus_signature_search, 'threatvault-dns-signature-search': dns_signature_search, 'threatvault-antispyware-signature-search': antispyware_signature_search, 'threatvault-signature-search-results': signature_search_results, } if demisto.command() == 'test-module': # This is the call made when pressing the integration Test button. return_results(test_module(client)) elif command in commands: return_results(commands[command](client, demisto.args())) else: raise NotImplementedError(f'Command ""{command}"" was not implemented.') except Exception as err: return_error(str(err), err) " 55502,"def unwrap_partitions(api_layer_object, axis=None, bind_ip=False): """""" Unwrap partitions of the `api_layer_object`. Parameters ---------- api_layer_object : DataFrame or Series The API layer object. axis : None, 0 or 1. Default is None The axis to unwrap partitions for (0 - row partitions, 1 - column partitions). If axis is None, all the partitions of the API layer object are unwrapped. bind_ip : boolean. Default is False Whether to bind node ip address to each partition or not. Returns ------- list A list of Ray.ObjectRef/Dask.Future to partitions of the `api_layer_object` if Ray/Dask is used as an engine. Notes ----- In case bind_ip=True, a list containing tuples of Ray.ObjectRef/Dask.Future to node ip addresses and partitions of the `api_layer_object`, respectively, is returned if Ray/Dask is used as an engine. """""" if not hasattr(api_layer_object, ""_query_compiler""): raise ValueError( f""Only API Layer objects may be passed in here, got {type(api_layer_object)} instead."" ) if bind_ip and EnablePartitionIPs.get() is False: ValueError( ""Passed `bind_ip=True` but `MODIN_ENABLE_PARTITIONS_API` env var was not exported."" ) if axis is None: def _unwrap_partitions(oid): if bind_ip: return [ [(partition.ip, getattr(partition, oid)) for partition in row] for row in api_layer_object._query_compiler._modin_frame._partitions ] else: return [ [getattr(partition, oid) for partition in row] for row in api_layer_object._query_compiler._modin_frame._partitions ] actual_engine = type( api_layer_object._query_compiler._modin_frame._partitions[0][0] ).__name__ if actual_engine in (""PandasOnRayFramePartition"",): return _unwrap_partitions(""oid"") elif actual_engine in (""PandasOnDaskFramePartition"",): return _unwrap_partitions(""future"") raise ValueError( f""Do not know how to unwrap '{actual_engine}' underlying partitions"" ) else: partitions = ( api_layer_object._query_compiler._modin_frame._frame_mgr_cls.axis_partition( api_layer_object._query_compiler._modin_frame._partitions, axis ^ 1 ) ) return [ part.coalesce(bind_ip=bind_ip).unwrap(squeeze=True, bind_ip=bind_ip) for part in partitions ] ","def unwrap_partitions(api_layer_object, axis=None, bind_ip=False): """""" Unwrap partitions of the `api_layer_object`. Parameters ---------- api_layer_object : DataFrame or Series The API layer object. axis : None, 0 or 1. Default is None The axis to unwrap partitions for (0 - row partitions, 1 - column partitions). If axis is None, all the partitions of the API layer object are unwrapped. bind_ip : boolean. Default is False Whether to bind node ip address to each partition or not. Returns ------- list A list of Ray.ObjectRef/Dask.Future to partitions of the `api_layer_object` if Ray/Dask is used as an engine. Notes ----- In case bind_ip=True, a list containing tuples of Ray.ObjectRef/Dask.Future to node ip addresses and partitions of the `api_layer_object`, respectively, is returned if Ray/Dask is used as an engine. """""" if not hasattr(api_layer_object, ""_query_compiler""): raise ValueError( f""Only API Layer objects may be passed in here, got {type(api_layer_object)} instead."" ) if bind_ip and not EnablePartitionIPs.get(): ValueError( ""Passed `bind_ip=True` but partitions IP API was not enabled."" ) if axis is None: def _unwrap_partitions(oid): if bind_ip: return [ [(partition.ip, getattr(partition, oid)) for partition in row] for row in api_layer_object._query_compiler._modin_frame._partitions ] else: return [ [getattr(partition, oid) for partition in row] for row in api_layer_object._query_compiler._modin_frame._partitions ] actual_engine = type( api_layer_object._query_compiler._modin_frame._partitions[0][0] ).__name__ if actual_engine in (""PandasOnRayFramePartition"",): return _unwrap_partitions(""oid"") elif actual_engine in (""PandasOnDaskFramePartition"",): return _unwrap_partitions(""future"") raise ValueError( f""Do not know how to unwrap '{actual_engine}' underlying partitions"" ) else: partitions = ( api_layer_object._query_compiler._modin_frame._frame_mgr_cls.axis_partition( api_layer_object._query_compiler._modin_frame._partitions, axis ^ 1 ) ) return [ part.coalesce(bind_ip=bind_ip).unwrap(squeeze=True, bind_ip=bind_ip) for part in partitions ] " 37833,"def build_in_directory(args: CommandLineArguments) -> None: platform: PlatformName if args.platform != ""auto"": platform = args.platform else: ci_provider = detect_ci_provider() if ci_provider is None: print( textwrap.dedent( """""" cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server; Travis CI, AppVeyor, Azure Pipelines, GitHub Actions, CircleCI, Gitlab, and Cirrus CI are supported. You can run on your development machine or other CI providers using the --platform argument. Check --help output for more information. """""" ), file=sys.stderr, ) sys.exit(2) if sys.platform.startswith(""linux""): platform = ""linux"" elif sys.platform == ""darwin"": platform = ""macos"" elif sys.platform == ""win32"": platform = ""windows"" else: print( 'cibuildwheel: Unable to detect platform from ""sys.platform"" in a CI environment. You can run ' ""cibuildwheel using the --platform argument. Check --help output for more information."", file=sys.stderr, ) sys.exit(2) if platform not in PLATFORMS: print(f""cibuildwheel: Unsupported platform: {platform}"", file=sys.stderr) sys.exit(2) options = compute_options(platform=platform, command_line_arguments=args) package_dir = options.globals.package_dir package_files = {""setup.py"", ""setup.cfg"", ""pyproject.toml""} if not any(package_dir.joinpath(name).exists() for name in package_files): names = "", "".join(sorted(package_files, reverse=True)) msg = f""cibuildwheel: Could not find any of {{{names}}} at root of package"" print(msg, file=sys.stderr) sys.exit(2) identifiers = get_build_identifiers( platform=platform, build_selector=options.globals.build_selector, architectures=options.globals.architectures, ) if args.print_build_identifiers is not None: for identifier in identifiers: py_ver, os_plat = identifier.split(""-"") impl = py_ver[:2] version = f""{py_ver[2]}.{py_ver[3:]}"" os_, arch = os_plat.split(""_"", maxsplit=1) print( args.print_build_identifiers.format( identifier=identifier, arch=arch, version=version, os=os_, impl=""CPython"" if impl == ""cp"" else ""PyPy"", ) ) sys.exit(0) # Add CIBUILDWHEEL environment variable os.environ[""CIBUILDWHEEL""] = ""1"" # Python is buffering by default when running on the CI platforms, giving problems interleaving subprocess call output with unflushed calls to 'print' sys.stdout = Unbuffered(sys.stdout) # type: ignore[assignment] # create the cache dir before it gets printed & builds performed CIBW_CACHE_PATH.mkdir(parents=True, exist_ok=True) print_preamble(platform=platform, options=options, identifiers=identifiers) try: options.check_for_invalid_configuration(identifiers) allowed_architectures_check(platform, options.globals.architectures) except ValueError as err: print(""cibuildwheel:"", *err.args, file=sys.stderr) sys.exit(4) if not identifiers: print( f""cibuildwheel: No build identifiers selected: {options.globals.build_selector}"", file=sys.stderr, ) if not args.allow_empty: sys.exit(3) output_dir = options.globals.output_dir if not output_dir.exists(): output_dir.mkdir(parents=True) tmp_path = Path(mkdtemp(prefix=""cibw-run-"")).resolve(strict=True) try: with cibuildwheel.util.print_new_wheels( ""\n{n} wheels produced in {m:.0f} minutes:"", output_dir ): if platform == ""linux"": cibuildwheel.linux.build(options, tmp_path) elif platform == ""windows"": cibuildwheel.windows.build(options, tmp_path) elif platform == ""macos"": cibuildwheel.macos.build(options, tmp_path) else: assert_never(platform) finally: # avoid https://github.com/python/cpython/issues/86962 by performing # cleanup manually shutil.rmtree(tmp_path, ignore_errors=sys.platform.startswith(""win"")) if tmp_path.exists(): log.warning(f""Can't delete temporary folder '{str(tmp_path)}'"") ","def build_in_directory(args: CommandLineArguments) -> None: platform: PlatformName if args.platform != ""auto"": platform = args.platform else: ci_provider = detect_ci_provider() if ci_provider is None: print( textwrap.dedent( """""" cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server; Travis CI, AppVeyor, Azure Pipelines, GitHub Actions, CircleCI, Gitlab, and Cirrus CI are supported. You can run on your development machine or other CI providers using the --platform argument. Check --help output for more information. """""" ), file=sys.stderr, ) sys.exit(2) if sys.platform.startswith(""linux""): platform = ""linux"" elif sys.platform == ""darwin"": platform = ""macos"" elif sys.platform == ""win32"": platform = ""windows"" else: print( 'cibuildwheel: Unable to detect platform from ""sys.platform"" in a CI environment. You can run ' ""cibuildwheel using the --platform argument. Check --help output for more information."", file=sys.stderr, ) sys.exit(2) if platform not in PLATFORMS: print(f""cibuildwheel: Unsupported platform: {platform}"", file=sys.stderr) sys.exit(2) options = compute_options(platform=platform, command_line_arguments=args) package_dir = options.globals.package_dir package_files = {""setup.py"", ""setup.cfg"", ""pyproject.toml""} if not any(package_dir.joinpath(name).exists() for name in package_files): names = "", "".join(sorted(package_files, reverse=True)) msg = f""cibuildwheel: Could not find any of {{{names}}} at root of package"" print(msg, file=sys.stderr) sys.exit(2) identifiers = get_build_identifiers( platform=platform, build_selector=options.globals.build_selector, architectures=options.globals.architectures, ) if args.print_build_identifiers: for identifier in identifiers: py_ver, os_plat = identifier.split(""-"") impl = py_ver[:2] version = f""{py_ver[2]}.{py_ver[3:]}"" os_, arch = os_plat.split(""_"", maxsplit=1) print( args.print_build_identifiers.format( identifier=identifier, arch=arch, version=version, os=os_, impl=""CPython"" if impl == ""cp"" else ""PyPy"", ) ) sys.exit(0) # Add CIBUILDWHEEL environment variable os.environ[""CIBUILDWHEEL""] = ""1"" # Python is buffering by default when running on the CI platforms, giving problems interleaving subprocess call output with unflushed calls to 'print' sys.stdout = Unbuffered(sys.stdout) # type: ignore[assignment] # create the cache dir before it gets printed & builds performed CIBW_CACHE_PATH.mkdir(parents=True, exist_ok=True) print_preamble(platform=platform, options=options, identifiers=identifiers) try: options.check_for_invalid_configuration(identifiers) allowed_architectures_check(platform, options.globals.architectures) except ValueError as err: print(""cibuildwheel:"", *err.args, file=sys.stderr) sys.exit(4) if not identifiers: print( f""cibuildwheel: No build identifiers selected: {options.globals.build_selector}"", file=sys.stderr, ) if not args.allow_empty: sys.exit(3) output_dir = options.globals.output_dir if not output_dir.exists(): output_dir.mkdir(parents=True) tmp_path = Path(mkdtemp(prefix=""cibw-run-"")).resolve(strict=True) try: with cibuildwheel.util.print_new_wheels( ""\n{n} wheels produced in {m:.0f} minutes:"", output_dir ): if platform == ""linux"": cibuildwheel.linux.build(options, tmp_path) elif platform == ""windows"": cibuildwheel.windows.build(options, tmp_path) elif platform == ""macos"": cibuildwheel.macos.build(options, tmp_path) else: assert_never(platform) finally: # avoid https://github.com/python/cpython/issues/86962 by performing # cleanup manually shutil.rmtree(tmp_path, ignore_errors=sys.platform.startswith(""win"")) if tmp_path.exists(): log.warning(f""Can't delete temporary folder '{str(tmp_path)}'"") " 10890,"def process_url(self, url, retrieve=False): """"""Evaluate a URL as a possible download, and maybe retrieve it"""""" if url in self.scanned_urls and not retrieve: return self.scanned_urls[url] = True if not URL_SCHEME(url): self.process_filename(url) return else: dists = list(distros_for_url(url)) if dists: if not self.url_ok(url): return self.debug(""Found link: %s"", url) if dists or not retrieve or url in self.fetched_urls: list(map(self.add, dists)) return # don't need the actual page if not self.url_ok(url): self.fetched_urls[url] = True return self.info(""Reading %s"", url) self.fetched_urls[url] = True # prevent multiple fetch attempts tmpl = ""Download error on %s: %%s -- Some packages may not be found!"" f = self.open_url(url, tmpl % url) if f is None: return if isinstance(f, urllib.error.HTTPError) and f.code == 401: self.info(""Authentication error: %s"" % f.msg) self.fetched_urls[f.url] = True if 'html' not in f.headers.get('content-type', '').lower(): f.close() # not html, we can't process it return base = f.url # handle redirects page = f.read() if isinstance(page, six.text_type): page = page.encode('utf8') charset = 'utf8' else: if isinstance(f, urllib.error.HTTPError): # Errors have no charset, assume latin1: charset = 'latin-1' else: try: charset = f.headers.get_param('charset') or 'latin-1' except AttributeError: charset = f.headers.getparam('charset') or 'latin-1' try: html_page = HTMLPage(page, charset, base, cache_link_parsing=False) except TypeError: html_page = HTMLPage(page, charset, base) plinks = list(parse_links(html_page)) pip_links = [l.url for l in plinks] if not isinstance(page, str): # In Python 3 and got bytes but want str. page = page.decode(charset, ""ignore"") f.close() links = [] for match in HREF.finditer(page): link = urllib.parse.urljoin(base, htmldecode(match.group(1))) links.append(link) assert not set(pip_links) ^ set(links) for link in plinks: if _check_link_requires_python(link, PY_VERSION_INFO): self.process_url(link.url) if url.startswith(self.index_url) and getattr(f, 'code', None) != 404: page = self.process_index(url, page) ","def process_url(self, url, retrieve=False): """"""Evaluate a URL as a possible download, and maybe retrieve it"""""" if url in self.scanned_urls and not retrieve: return self.scanned_urls[url] = True if not URL_SCHEME(url): self.process_filename(url) return else: dists = list(distros_for_url(url)) if dists: if not self.url_ok(url): return self.debug(""Found link: %s"", url) if dists or not retrieve or url in self.fetched_urls: list(map(self.add, dists)) return # don't need the actual page if not self.url_ok(url): self.fetched_urls[url] = True return self.info(""Reading %s"", url) self.fetched_urls[url] = True # prevent multiple fetch attempts tmpl = ""Download error on %s: %%s -- Some packages may not be found!"" f = self.open_url(url, tmpl % url) if f is None: return if isinstance(f, urllib.error.HTTPError) and f.code == 401: self.info(""Authentication error: %s"" % f.msg) self.fetched_urls[f.url] = True if 'html' not in f.headers.get('content-type', '').lower(): f.close() # not html, we can't process it return base = f.url # handle redirects page = f.read() if isinstance(page, six.text_type): page = page.encode('utf8') charset = 'utf8' else: if isinstance(f, urllib.error.HTTPError): # Errors have no charset, assume latin1: charset = 'latin-1' else: try: charset = f.headers.get_param('charset') or 'latin-1' except AttributeError: charset = f.headers.getparam('charset') or 'latin-1' try: html_page = HTMLPage(page, charset, base, cache_link_parsing=False) except TypeError: html_page = HTMLPage(page, charset, base) plinks = list(parse_links(html_page)) pip_links = [l.url for l in plinks] if not isinstance(page, str): # In Python 3 and got bytes but want str. page = page.decode(charset, ""ignore"") f.close() links = [] for match in HREF.finditer(page): link = urllib.parse.urljoin(base, htmldecode(match.group(1))) links.append(link) assert set(pip_links) == set(links) for link in plinks: if _check_link_requires_python(link, PY_VERSION_INFO): self.process_url(link.url) if url.startswith(self.index_url) and getattr(f, 'code', None) != 404: page = self.process_index(url, page) " 6879,"def get_permission_query_conditions(user): if not user: user = frappe.session.user if user == 'Administrator': return roles = frappe.get_roles(user) if ""System Manager"" in roles: return None allowed_modules = [frappe.db.escape(module.get('module_name')) for module in get_modules_from_all_apps_for_user()] module_condition = '`tabDashboard`.`module` in ({allowed_modules}) or `tabDashboard`.`module` is NULL'.format( allowed_modules=','.join(allowed_modules)) return '{module_condition}'.format(module_condition=module_condition) ","def get_permission_query_conditions(user): if not user: user = frappe.session.user if user == 'Administrator': return roles = frappe.get_roles(user) if ""System Manager"" in roles: return None allowed_modules = [frappe.db.escape(module.get('module_name')) for module in get_modules_from_all_apps_for_user()] module_condition = '`tabDashboard`.`module` in ({allowed_modules}) or `tabDashboard`.`module` is NULL'.format( allowed_modules=','.join(allowed_modules)) return module_condition " 7234,"def _area_overlap(A): """""" Return overlapping area of A with itself. Create overlap arrays for higher dimensions using matrix multiplication. >>> _area_overlap(np.empty(4)) array([0.625, 0.875, 0.875, 0.625]) >>> _area_overlap(np.empty((3, 5))) array([[0.4 , 0.53333333, 0.66666667, 0.53333333, 0.4 ], [0.6 , 0.8 , 1. , 0.8 , 0.6 ], [0.4 , 0.53333333, 0.66666667, 0.53333333, 0.4 ]]) """""" for dim, shape in enumerate(A.shape): if dim == 0: w = _triangle(shape) else: w = w[..., None] @ _triangle(shape)[None, ] return w ","def _area_overlap(A): """""" Return overlapping area of A with itself. Create overlap arrays for higher dimensions using matrix multiplication. >>> _area_overlap(np.empty(4)) array([0.625, 0.875, 0.875, 0.625]) >>> _area_overlap(np.empty((3, 5))) array([[0.4 , 0.53333333, 0.66666667, 0.53333333, 0.4 ], [0.6 , 0.8 , 1. , 0.8 , 0.6 ], [0.4 , 0.53333333, 0.66666667, 0.53333333, 0.4 ]]) """""" for dim, shape in enumerate(A.shape): if dim == 0: w = _triangle(shape) else: w = w[..., np.newaxis] @ _triangle(shape)[np.newaxis, ...] return w " 39630,"def _normalize_view_ptr_expr( ir_source: irast.Set, shape_el: qlast.ShapeElement, view_scls: s_objtypes.ObjectType, *, path_id: irast.PathId, path_id_namespace: Optional[irast.Namespace]=None, exprtype: s_types.ExprType = s_types.ExprType.Select, from_default: bool=False, view_rptr: Optional[context.ViewRPtr]=None, pending_pointers: Collection[s_pointers.Pointer]=(), ctx: context.ContextLevel) -> Tuple[ s_pointers.Pointer, Optional[irast.Set]]: steps = shape_el.expr.steps is_linkprop = False is_polymorphic = False is_mutation = exprtype.is_insert() or exprtype.is_update() materialized = None # Pointers may be qualified by the explicit source # class, which is equivalent to Expr[IS Type]. plen = len(steps) ptrsource: s_sources.Source = view_scls qlexpr: Optional[qlast.Expr] = None target_typexpr = None source = [] base_ptrcls_is_alias = False irexpr = None if plen >= 2 and isinstance(steps[-1], qlast.TypeIntersection): # Target type intersection: foo: Type target_typexpr = steps[-1].type plen -= 1 steps = steps[:-1] if plen == 1: # regular shape lexpr = steps[0] assert isinstance(lexpr, qlast.Ptr) is_linkprop = lexpr.type == 'property' if is_linkprop: if view_rptr is None or view_rptr.ptrcls is None: raise errors.QueryError( 'invalid reference to link property ' 'in top level shape', context=lexpr.context) assert isinstance(view_rptr.ptrcls, s_links.Link) ptrsource = view_rptr.ptrcls elif plen == 2 and isinstance(steps[0], qlast.TypeIntersection): # Source type intersection: [IS Type].foo source = [steps[0]] lexpr = steps[1] ptype = steps[0].type if not isinstance(ptype, qlast.TypeName): raise errors.QueryError( 'complex type expressions are not supported here', context=ptype.context, ) source_spec = schemactx.get_schema_type(ptype.maintype, ctx=ctx) if not isinstance(source_spec, s_objtypes.ObjectType): raise errors.QueryError( f'expected object type, got ' f'{source_spec.get_verbosename(ctx.env.schema)}', context=ptype.context, ) ptrsource = source_spec is_polymorphic = True else: # pragma: no cover raise RuntimeError( f'unexpected path length in view shape: {len(steps)}') assert isinstance(lexpr, qlast.Ptr) ptrname = lexpr.ptr.name compexpr: Optional[qlast.Expr] = shape_el.compexpr if compexpr is None and is_mutation: raise errors.QueryError( ""mutation queries must specify values with ':='"", context=steps[-1].context, ) ptrcls: Optional[s_pointers.Pointer] if compexpr is None: ptrcls = setgen.resolve_ptr( ptrsource, ptrname, track_ref=lexpr, ctx=ctx, source_context=shape_el.context) if is_polymorphic: ptrcls = schemactx.derive_ptr( ptrcls, view_scls, ctx=ctx) base_ptrcls = ptrcls.get_bases(ctx.env.schema).first(ctx.env.schema) base_ptr_is_computable = base_ptrcls in ctx.source_map ptr_name = sn.QualName( module='__', name=ptrcls.get_shortname(ctx.env.schema).name, ) # Schema computables that point to opaque unions will just have # BaseObject as their target, but in order to properly compile # it, we need to know the actual type here, so we recompute it. # XXX: This is a hack, though, and hopefully we can fix it once # the computable/alias rework lands. is_opaque_schema_computable = ( ptrcls.is_pure_computable(ctx.env.schema) and (t := ptrcls.get_target(ctx.env.schema)) and t.get_name(ctx.env.schema) == sn.QualName('std', 'BaseObject') ) base_required = base_ptrcls.get_required(ctx.env.schema) base_cardinality = _get_base_ptr_cardinality(base_ptrcls, ctx=ctx) base_is_singleton = False if base_cardinality is not None and base_cardinality.is_known(): base_is_singleton = base_cardinality.is_single() is_nontrivial = astutils.is_nontrivial_shape_element(shape_el) is_obj = not_none(ptrcls.get_target(ctx.env.schema)).is_object_type() if ( is_obj or is_nontrivial or shape_el.elements or base_ptr_is_computable or is_polymorphic or target_typexpr is not None or (ctx.implicit_limit and not base_is_singleton) or is_opaque_schema_computable ): if target_typexpr is None: qlexpr = qlast.Path(steps=[*source, lexpr], partial=True) else: qlexpr = qlast.Path(steps=[ *source, lexpr, qlast.TypeIntersection(type=target_typexpr), ], partial=True) if shape_el.elements: qlexpr = qlast.Shape(expr=qlexpr, elements=shape_el.elements) qlexpr = astutils.ensure_qlstmt(qlexpr) assert isinstance(qlexpr, qlast.SelectQuery) qlexpr.where = shape_el.where qlexpr.orderby = shape_el.orderby if shape_el.offset or shape_el.limit: qlexpr = qlast.SelectQuery(result=qlexpr, implicit=True) qlexpr.offset = shape_el.offset qlexpr.limit = shape_el.limit if ( (ctx.expr_exposed or ctx.stmt is ctx.toplevel_stmt) and not qlexpr.limit and ctx.implicit_limit and not base_is_singleton ): qlexpr = qlast.SelectQuery(result=qlexpr, implicit=True) qlexpr.limit = qlast.IntegerConstant( value=str(ctx.implicit_limit), ) if target_typexpr is not None: assert isinstance(target_typexpr, qlast.TypeName) intersector_type = schemactx.get_schema_type( target_typexpr.maintype, ctx=ctx) int_result = schemactx.apply_intersection( ptrcls.get_target(ctx.env.schema), # type: ignore intersector_type, ctx=ctx, ) ptr_target = int_result.stype else: _ptr_target = ptrcls.get_target(ctx.env.schema) assert _ptr_target ptr_target = _ptr_target ptr_required = base_required ptr_cardinality = base_cardinality if shape_el.where: # If the shape has a filter on it, we need to force a reinference # of the cardinality, to produce an error if needed. ptr_cardinality = None if ptr_cardinality is None or not ptr_cardinality.is_known(): # We do not know the parent's pointer cardinality yet. ctx.env.pointer_derivation_map[base_ptrcls].append(ptrcls) ctx.env.pointer_specified_info[ptrcls] = ( shape_el.cardinality, shape_el.required, shape_el.context) # If we generated qlexpr for the element, we process the # subview by just compiling the qlexpr. This is so that we can # figure out if it needs materialization and also so that # `qlexpr is not None` always implies that we did the # compilation. if qlexpr: qlptrcls = ptrcls qlptrsource = ptrsource irexpr, _ = _compile_qlexpr( ir_source, qlexpr, view_scls, ptrcls=qlptrcls, ptrsource=qlptrsource, path_id=path_id, ptr_name=ptr_name, is_linkprop=is_linkprop, exprtype=exprtype, ctx=ctx) materialized = setgen.should_materialize( irexpr, ptrcls=ptrcls, materialize_visible=True, skipped_bindings={path_id}, ctx=ctx) ptr_target = inference.infer_type(irexpr, ctx.env) # compexpr is not None else: base_ptrcls = ptrcls = None if (is_mutation and ptrname not in ctx.special_computables_in_mutation_shape): # If this is a mutation, the pointer must exist. ptrcls = setgen.resolve_ptr( ptrsource, ptrname, track_ref=lexpr, ctx=ctx) if ptrcls.is_pure_computable(ctx.env.schema): ptr_vn = ptrcls.get_verbosename(ctx.env.schema, with_parent=True) raise errors.QueryError( f'modification of computed {ptr_vn} is prohibited', context=shape_el.context) base_ptrcls = ptrcls.get_bases( ctx.env.schema).first(ctx.env.schema) ptr_name = sn.QualName( module='__', name=ptrcls.get_shortname(ctx.env.schema).name, ) else: ptr_name = sn.QualName( module='__', name=ptrname, ) try: ptrcls = setgen.resolve_ptr( ptrsource, ptrname, track_ref=False, ctx=ctx, ) base_ptrcls = ptrcls.get_bases( ctx.env.schema).first(ctx.env.schema) except errors.InvalidReferenceError: # This is a NEW computable pointer, it's fine. pass qlexpr = astutils.ensure_qlstmt(compexpr) if ((ctx.expr_exposed or ctx.stmt is ctx.toplevel_stmt) and ctx.implicit_limit and isinstance(qlexpr, qlast.OffsetLimitMixin) and not qlexpr.limit): qlexpr = qlast.SelectQuery(result=qlexpr, implicit=True) qlexpr.limit = qlast.IntegerConstant(value=str(ctx.implicit_limit)) irexpr, sub_view_rptr = _compile_qlexpr( ir_source, qlexpr, view_scls, ptrcls=ptrcls, ptrsource=ptrsource, path_id=path_id, ptr_name=ptr_name, is_linkprop=is_linkprop, exprtype=exprtype, ctx=ctx) materialized = setgen.should_materialize( irexpr, ptrcls=ptrcls, materialize_visible=True, skipped_bindings={path_id}, ctx=ctx) ptr_target = inference.infer_type(irexpr, ctx.env) if ( shape_el.operation.op is qlast.ShapeOp.APPEND or shape_el.operation.op is qlast.ShapeOp.SUBTRACT ): if not exprtype.is_update(): op = ( '+=' if shape_el.operation.op is qlast.ShapeOp.APPEND else '-=' ) raise errors.EdgeQLSyntaxError( f""unexpected '{op}'"", context=shape_el.operation.context, ) irexpr.context = compexpr.context is_inbound_alias = False if base_ptrcls is None: base_ptrcls = sub_view_rptr.base_ptrcls base_ptrcls_is_alias = sub_view_rptr.ptrcls_is_alias is_inbound_alias = ( sub_view_rptr.rptr_dir is s_pointers.PointerDirection.Inbound) if ptrcls is not None: ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'owned', True) ptr_cardinality = None ptr_required = False if ( isinstance(ptr_target, s_types.Collection) and not ctx.env.orig_schema.get_by_id(ptr_target.id, default=None) ): # Record references to implicitly defined collection types, # so that the alias delta machinery can pick them up. ctx.env.created_schema_objects.add(ptr_target) anytype = ptr_target.find_any(ctx.env.schema) if anytype is not None: raise errors.QueryError( 'expression returns value of indeterminate type', context=ctx.env.type_origins.get(anytype), ) # Validate that the insert/update expression is # of the correct class. if is_mutation and ptrcls is not None: base_target = ptrcls.get_target(ctx.env.schema) assert base_target is not None if ptr_target.assignment_castable_to( base_target, schema=ctx.env.schema): # Force assignment casts if the target type is not a # subclass of the base type and the cast is not to an # object type. if not ( base_target.is_object_type() or s_types.is_type_compatible( base_target, ptr_target, schema=ctx.env.schema ) ): qlexpr = astutils.ensure_qlstmt(qlast.TypeCast( type=typegen.type_to_ql_typeref(base_target, ctx=ctx), expr=compexpr, )) ptr_target = base_target # We also need to compile the cast to IR. with ctx.new() as subctx: subctx.anchors = subctx.anchors.copy() source_path = subctx.create_anchor(irexpr, 'a') cast_qlexpr = astutils.ensure_qlstmt(qlast.TypeCast( type=typegen.type_to_ql_typeref( base_target, ctx=ctx), expr=source_path, )) old_rptr = irexpr.rptr irexpr.rptr = None irexpr = dispatch.compile(cast_qlexpr, ctx=subctx) irexpr.rptr = old_rptr else: expected = [ repr(str(base_target.get_displayname(ctx.env.schema))) ] ercls: Type[errors.EdgeDBError] if ptrcls.is_property(ctx.env.schema): ercls = errors.InvalidPropertyTargetError else: ercls = errors.InvalidLinkTargetError ptr_vn = ptrcls.get_verbosename(ctx.env.schema, with_parent=True) raise ercls( f'invalid target for {ptr_vn}: ' f'{str(ptr_target.get_displayname(ctx.env.schema))!r} ' f'(expecting {"" or "".join(expected)})' ) # Common code for computed/not computed if ptrcls and ptrcls in pending_pointers: schema = ctx.env.schema vnp = ptrcls.get_verbosename(schema, with_parent=True) raise errors.QueryError( f'duplicate definition of {vnp}', context=shape_el.context) if qlexpr is not None or ptrcls is None: src_scls: s_sources.Source if is_linkprop: # Proper checking was done when is_linkprop is defined. assert view_rptr is not None assert isinstance(view_rptr.ptrcls, s_links.Link) src_scls = view_rptr.ptrcls else: src_scls = view_scls if ptr_target.is_object_type(): base = ctx.env.get_track_schema_object( sn.QualName('std', 'link'), expr=None) else: base = ctx.env.get_track_schema_object( sn.QualName('std', 'property'), expr=None) if base_ptrcls is not None: derive_from = base_ptrcls else: derive_from = base derived_name = schemactx.derive_view_name( base_ptrcls, derived_name_base=ptr_name, derived_name_quals=[str(src_scls.get_name(ctx.env.schema))], ctx=ctx, ) existing = ctx.env.schema.get( derived_name, default=None, type=s_pointers.Pointer) if existing is not None: existing_target = existing.get_target(ctx.env.schema) assert existing_target is not None if ctx.recompiling_schema_alias: ptr_cardinality = existing.get_cardinality(ctx.env.schema) ptr_required = existing.get_required(ctx.env.schema) if ptr_target == existing_target: ptrcls = existing elif ptr_target.implicitly_castable_to( existing_target, ctx.env.schema): ctx.env.schema = existing.set_target( ctx.env.schema, ptr_target) ptrcls = existing else: vnp = existing.get_verbosename( ctx.env.schema, with_parent=True) t1_vn = existing_target.get_verbosename(ctx.env.schema) t2_vn = ptr_target.get_verbosename(ctx.env.schema) if compexpr is not None: source_context = compexpr.context else: source_context = shape_el.expr.steps[-1].context raise errors.SchemaError( f'cannot redefine {vnp} as {t2_vn}', details=f'{vnp} is defined as {t1_vn}', context=source_context, ) else: ptrcls = schemactx.derive_ptr( derive_from, src_scls, ptr_target, derive_backlink=is_inbound_alias, derived_name=derived_name, ctx=ctx) elif ptrcls.get_target(ctx.env.schema) != ptr_target: ctx.env.schema = ptrcls.set_target(ctx.env.schema, ptr_target) assert ptrcls is not None if materialized and is_mutation and any( x.is_binding == irast.BindingKind.With and x.expr and inference.infer_volatility( x.expr, ctx.env, for_materialization=True).is_volatile() for reason in materialized if isinstance(reason, irast.MaterializeVisible) for _, x in reason.sets ): raise errors.QueryError( f'cannot refer to volatile WITH bindings from DML', context=compexpr and compexpr.context, ) if materialized and not is_mutation and ctx.qlstmt: assert ptrcls not in ctx.env.materialized_sets ctx.env.materialized_sets[ptrcls] = ctx.qlstmt, materialized if irexpr: setgen.maybe_materialize(ptrcls, irexpr, ctx=ctx) if qlexpr is not None: ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'defined_here', True ) if qlexpr is not None: ctx.source_map[ptrcls] = irast.ComputableInfo( qlexpr=qlexpr, irexpr=irexpr, context=ctx, path_id=path_id, path_id_ns=path_id_namespace, shape_op=shape_el.operation.op, should_materialize=materialized or [], ) if compexpr is not None or is_polymorphic or materialized: if (old_ptrref := ctx.env.ptr_ref_cache.get(ptrcls)): old_ptrref.is_computable = True ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'computable', True, ) ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'owned', True, ) if ptr_cardinality is not None: ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'cardinality', ptr_cardinality) ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'required', ptr_required) else: if qlexpr is None and ptrcls is not base_ptrcls: ctx.env.pointer_derivation_map[base_ptrcls].append(ptrcls) base_cardinality = None base_required = None if base_ptrcls is not None and not base_ptrcls_is_alias: base_cardinality = _get_base_ptr_cardinality(base_ptrcls, ctx=ctx) base_required = base_ptrcls.get_required(ctx.env.schema) if base_cardinality is None or not base_cardinality.is_known(): # If the base cardinality is not known the we can't make # any checks here and will rely on validation in the # cardinality inferer. specified_cardinality = shape_el.cardinality specified_required = shape_el.required else: specified_cardinality = base_cardinality # Inferred optionality overrides that of the base pointer # if base pointer is not `required`, hence the is True check. if shape_el.required is not None: specified_required = shape_el.required elif base_required is True: specified_required = base_required else: specified_required = None if ( shape_el.cardinality is not None and base_ptrcls is not None and shape_el.cardinality != base_cardinality ): base_src = base_ptrcls.get_source(ctx.env.schema) assert base_src is not None base_src_name = base_src.get_verbosename(ctx.env.schema) raise errors.SchemaError( f'cannot redefine the cardinality of ' f'{ptrcls.get_verbosename(ctx.env.schema)}: ' f'it is defined as {base_cardinality.as_ptr_qual()!r} ' f'in the base {base_src_name}', context=compexpr and compexpr.context, ) if ( shape_el.required is False and base_ptrcls is not None and base_required ): base_src = base_ptrcls.get_source(ctx.env.schema) assert base_src is not None base_src_name = base_src.get_verbosename(ctx.env.schema) raise errors.SchemaError( f'cannot redefine ' f'{ptrcls.get_verbosename(ctx.env.schema)} ' f'as optional: it is defined as required ' f'in the base {base_src_name}', context=compexpr and compexpr.context, ) ctx.env.pointer_specified_info[ptrcls] = ( specified_cardinality, specified_required, shape_el.context) ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'cardinality', qltypes.SchemaCardinality.Unknown) # Prohibit update of readonly if exprtype.is_update() and ptrcls.get_readonly(ctx.env.schema): raise errors.QueryError( f'cannot update {ptrcls.get_verbosename(ctx.env.schema)}: ' f'it is declared as read-only', context=compexpr and compexpr.context, ) # Prohibit invalid operations on id and __type__ ptrcls_sn = ptrcls.get_shortname(ctx.env.schema) id_access = ( ptrcls_sn.name == 'id' and ( not ctx.env.options.allow_user_specified_id or not exprtype.is_mutation() ) ) if ( (compexpr is not None or is_polymorphic) and (id_access or ptrcls.is_protected_pointer(ctx.env.schema)) and not from_default ): if is_polymorphic: msg = (f'cannot access {ptrcls_sn.name} on a polymorphic ' f'shape element') else: msg = f'cannot assign to {ptrcls_sn.name}' if id_access and not ctx.env.options.allow_user_specified_id: hint = 'config setting allow_user_specified_id must be enabled' else: hint = None raise errors.QueryError(msg, context=shape_el.context, hint=hint) return ptrcls, irexpr ","def _normalize_view_ptr_expr( ir_source: irast.Set, shape_el: qlast.ShapeElement, view_scls: s_objtypes.ObjectType, *, path_id: irast.PathId, path_id_namespace: Optional[irast.Namespace]=None, exprtype: s_types.ExprType = s_types.ExprType.Select, from_default: bool=False, view_rptr: Optional[context.ViewRPtr]=None, pending_pointers: Collection[s_pointers.Pointer]=(), ctx: context.ContextLevel) -> Tuple[ s_pointers.Pointer, Optional[irast.Set]]: steps = shape_el.expr.steps is_linkprop = False is_polymorphic = False is_mutation = exprtype.is_insert() or exprtype.is_update() materialized = None # Pointers may be qualified by the explicit source # class, which is equivalent to Expr[IS Type]. plen = len(steps) ptrsource: s_sources.Source = view_scls qlexpr: Optional[qlast.Expr] = None target_typexpr = None source = [] base_ptrcls_is_alias = False irexpr = None if plen >= 2 and isinstance(steps[-1], qlast.TypeIntersection): # Target type intersection: foo: Type target_typexpr = steps[-1].type plen -= 1 steps = steps[:-1] if plen == 1: # regular shape lexpr = steps[0] assert isinstance(lexpr, qlast.Ptr) is_linkprop = lexpr.type == 'property' if is_linkprop: if view_rptr is None or view_rptr.ptrcls is None: raise errors.QueryError( 'invalid reference to link property ' 'in top level shape', context=lexpr.context) assert isinstance(view_rptr.ptrcls, s_links.Link) ptrsource = view_rptr.ptrcls elif plen == 2 and isinstance(steps[0], qlast.TypeIntersection): # Source type intersection: [IS Type].foo source = [steps[0]] lexpr = steps[1] ptype = steps[0].type if not isinstance(ptype, qlast.TypeName): raise errors.QueryError( 'complex type expressions are not supported here', context=ptype.context, ) source_spec = schemactx.get_schema_type(ptype.maintype, ctx=ctx) if not isinstance(source_spec, s_objtypes.ObjectType): raise errors.QueryError( f'expected object type, got ' f'{source_spec.get_verbosename(ctx.env.schema)}', context=ptype.context, ) ptrsource = source_spec is_polymorphic = True else: # pragma: no cover raise RuntimeError( f'unexpected path length in view shape: {len(steps)}') assert isinstance(lexpr, qlast.Ptr) ptrname = lexpr.ptr.name compexpr: Optional[qlast.Expr] = shape_el.compexpr if compexpr is None and is_mutation: raise errors.QueryError( ""mutation queries must specify values with ':='"", context=steps[-1].context, ) ptrcls: Optional[s_pointers.Pointer] if compexpr is None: ptrcls = setgen.resolve_ptr( ptrsource, ptrname, track_ref=lexpr, ctx=ctx, source_context=shape_el.context) if is_polymorphic: ptrcls = schemactx.derive_ptr( ptrcls, view_scls, ctx=ctx) base_ptrcls = ptrcls.get_bases(ctx.env.schema).first(ctx.env.schema) base_ptr_is_computable = base_ptrcls in ctx.source_map ptr_name = sn.QualName( module='__', name=ptrcls.get_shortname(ctx.env.schema).name, ) # Schema computables that point to opaque unions will just have # BaseObject as their target, but in order to properly compile # it, we need to know the actual type here, so we recompute it. # XXX: This is a hack, though, and hopefully we can fix it once # the computable/alias rework lands. is_opaque_schema_computable = ( ptrcls.is_pure_computable(ctx.env.schema) and (t := ptrcls.get_target(ctx.env.schema)) and t.get_name(ctx.env.schema) == sn.QualName('std', 'BaseObject') ) base_required = base_ptrcls.get_required(ctx.env.schema) base_cardinality = _get_base_ptr_cardinality(base_ptrcls, ctx=ctx) base_is_singleton = False if base_cardinality is not None and base_cardinality.is_known(): base_is_singleton = base_cardinality.is_single() is_nontrivial = astutils.is_nontrivial_shape_element(shape_el) is_obj = not_none(ptrcls.get_target(ctx.env.schema)).is_object_type() if ( is_obj or is_nontrivial or shape_el.elements or base_ptr_is_computable or is_polymorphic or target_typexpr is not None or (ctx.implicit_limit and not base_is_singleton) or is_opaque_schema_computable ): if target_typexpr is None: qlexpr = qlast.Path(steps=[*source, lexpr], partial=True) else: qlexpr = qlast.Path(steps=[ *source, lexpr, qlast.TypeIntersection(type=target_typexpr), ], partial=True) if shape_el.elements: qlexpr = qlast.Shape(expr=qlexpr, elements=shape_el.elements) qlexpr = astutils.ensure_qlstmt(qlexpr) assert isinstance(qlexpr, qlast.SelectQuery) qlexpr.where = shape_el.where qlexpr.orderby = shape_el.orderby if shape_el.offset or shape_el.limit: qlexpr = qlast.SelectQuery(result=qlexpr, implicit=True) qlexpr.offset = shape_el.offset qlexpr.limit = shape_el.limit if ( (ctx.expr_exposed or ctx.stmt is ctx.toplevel_stmt) and not qlexpr.limit and ctx.implicit_limit and not base_is_singleton ): qlexpr = qlast.SelectQuery(result=qlexpr, implicit=True) qlexpr.limit = qlast.IntegerConstant( value=str(ctx.implicit_limit), ) if target_typexpr is not None: assert isinstance(target_typexpr, qlast.TypeName) intersector_type = schemactx.get_schema_type( target_typexpr.maintype, ctx=ctx) int_result = schemactx.apply_intersection( ptrcls.get_target(ctx.env.schema), # type: ignore intersector_type, ctx=ctx, ) ptr_target = int_result.stype else: _ptr_target = ptrcls.get_target(ctx.env.schema) assert _ptr_target ptr_target = _ptr_target ptr_required = base_required ptr_cardinality = base_cardinality if shape_el.where: # If the shape has a filter on it, we need to force a reinference # of the cardinality, to produce an error if needed. ptr_cardinality = None if ptr_cardinality is None or not ptr_cardinality.is_known(): # We do not know the parent's pointer cardinality yet. ctx.env.pointer_derivation_map[base_ptrcls].append(ptrcls) ctx.env.pointer_specified_info[ptrcls] = ( shape_el.cardinality, shape_el.required, shape_el.context) # If we generated qlexpr for the element, we process the # subview by just compiling the qlexpr. This is so that we can # figure out if it needs materialization and also so that # `qlexpr is not None` always implies that we did the # compilation. if qlexpr: qlptrcls = ptrcls qlptrsource = ptrsource irexpr, _ = _compile_qlexpr( ir_source, qlexpr, view_scls, ptrcls=qlptrcls, ptrsource=qlptrsource, path_id=path_id, ptr_name=ptr_name, is_linkprop=is_linkprop, exprtype=exprtype, ctx=ctx) materialized = setgen.should_materialize( irexpr, ptrcls=ptrcls, materialize_visible=True, skipped_bindings={path_id}, ctx=ctx) ptr_target = inference.infer_type(irexpr, ctx.env) # compexpr is not None else: base_ptrcls = ptrcls = None if (is_mutation and ptrname not in ctx.special_computables_in_mutation_shape): # If this is a mutation, the pointer must exist. ptrcls = setgen.resolve_ptr( ptrsource, ptrname, track_ref=lexpr, ctx=ctx) if ptrcls.is_pure_computable(ctx.env.schema): ptr_vn = ptrcls.get_verbosename(ctx.env.schema, with_parent=True) raise errors.QueryError( f'modification of computed {ptr_vn} is prohibited', context=shape_el.context) base_ptrcls = ptrcls.get_bases( ctx.env.schema).first(ctx.env.schema) ptr_name = sn.QualName( module='__', name=ptrcls.get_shortname(ctx.env.schema).name, ) else: ptr_name = sn.QualName( module='__', name=ptrname, ) try: ptrcls = setgen.resolve_ptr( ptrsource, ptrname, track_ref=False, ctx=ctx, ) base_ptrcls = ptrcls.get_bases( ctx.env.schema).first(ctx.env.schema) except errors.InvalidReferenceError: # This is a NEW computable pointer, it's fine. pass qlexpr = astutils.ensure_qlstmt(compexpr) if ((ctx.expr_exposed or ctx.stmt is ctx.toplevel_stmt) and ctx.implicit_limit and isinstance(qlexpr, qlast.OffsetLimitMixin) and not qlexpr.limit): qlexpr = qlast.SelectQuery(result=qlexpr, implicit=True) qlexpr.limit = qlast.IntegerConstant(value=str(ctx.implicit_limit)) irexpr, sub_view_rptr = _compile_qlexpr( ir_source, qlexpr, view_scls, ptrcls=ptrcls, ptrsource=ptrsource, path_id=path_id, ptr_name=ptr_name, is_linkprop=is_linkprop, exprtype=exprtype, ctx=ctx) materialized = setgen.should_materialize( irexpr, ptrcls=ptrcls, materialize_visible=True, skipped_bindings={path_id}, ctx=ctx) ptr_target = inference.infer_type(irexpr, ctx.env) if ( shape_el.operation.op is qlast.ShapeOp.APPEND or shape_el.operation.op is qlast.ShapeOp.SUBTRACT ): if not exprtype.is_update(): op = ( '+=' if shape_el.operation.op is qlast.ShapeOp.APPEND else '-=' ) raise errors.EdgeQLSyntaxError( f""unexpected '{op}'"", context=shape_el.operation.context, ) irexpr.context = compexpr.context is_inbound_alias = False if base_ptrcls is None: base_ptrcls = sub_view_rptr.base_ptrcls base_ptrcls_is_alias = sub_view_rptr.ptrcls_is_alias is_inbound_alias = ( sub_view_rptr.rptr_dir is s_pointers.PointerDirection.Inbound) if ptrcls is not None: ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'owned', True) ptr_cardinality = None ptr_required = False if ( isinstance(ptr_target, s_types.Collection) and not ctx.env.orig_schema.get_by_id(ptr_target.id, default=None) ): # Record references to implicitly defined collection types, # so that the alias delta machinery can pick them up. ctx.env.created_schema_objects.add(ptr_target) anytype = ptr_target.find_any(ctx.env.schema) if anytype is not None: raise errors.QueryError( 'expression returns value of indeterminate type', context=ctx.env.type_origins.get(anytype), ) # Validate that the insert/update expression is # of the correct class. if is_mutation and ptrcls is not None: base_target = ptrcls.get_target(ctx.env.schema) assert base_target is not None if ptr_target.assignment_castable_to( base_target, schema=ctx.env.schema): # Force assignment casts if the target type is not a # subclass of the base type and the cast is not to an # object type. if not ( base_target.is_object_type() or s_types.is_type_compatible( base_target, ptr_target, schema=ctx.env.schema ) ): qlexpr = astutils.ensure_qlstmt(qlast.TypeCast( type=typegen.type_to_ql_typeref(base_target, ctx=ctx), expr=compexpr, )) ptr_target = base_target # We also need to compile the cast to IR. with ctx.new() as subctx: subctx.anchors = subctx.anchors.copy() source_path = subctx.create_anchor(irexpr, 'a') cast_qlexpr = astutils.ensure_qlstmt(qlast.TypeCast( type=typegen.type_to_ql_typeref( base_target, ctx=ctx), expr=source_path, )) old_rptr = irexpr.rptr irexpr.rptr = None irexpr = dispatch.compile(cast_qlexpr, ctx=subctx) irexpr.rptr = old_rptr else: expected = [ repr(str(base_target.get_displayname(ctx.env.schema))) ] ercls: Type[errors.EdgeDBError] if ptrcls.is_property(ctx.env.schema): ercls = errors.InvalidPropertyTargetError else: ercls = errors.InvalidLinkTargetError ptr_vn = ptrcls.get_verbosename(ctx.env.schema, with_parent=True) raise ercls( f'invalid target for {ptr_vn}: ' f'{str(ptr_target.get_displayname(ctx.env.schema))!r} ' f'(expecting {"" or "".join(expected)})' ) # Common code for computed/not computed if ptrcls and ptrcls in pending_pointers: schema = ctx.env.schema vnp = ptrcls.get_verbosename(schema, with_parent=True) raise errors.QueryError( f'duplicate definition of {vnp}', context=shape_el.context) if qlexpr is not None or ptrcls is None: src_scls: s_sources.Source if is_linkprop: # Proper checking was done when is_linkprop is defined. assert view_rptr is not None assert isinstance(view_rptr.ptrcls, s_links.Link) src_scls = view_rptr.ptrcls else: src_scls = view_scls if ptr_target.is_object_type(): base = ctx.env.get_track_schema_object( sn.QualName('std', 'link'), expr=None) else: base = ctx.env.get_track_schema_object( sn.QualName('std', 'property'), expr=None) if base_ptrcls is not None: derive_from = base_ptrcls else: derive_from = base derived_name = schemactx.derive_view_name( base_ptrcls, derived_name_base=ptr_name, derived_name_quals=[str(src_scls.get_name(ctx.env.schema))], ctx=ctx, ) existing = ctx.env.schema.get( derived_name, default=None, type=s_pointers.Pointer) if existing is not None: existing_target = existing.get_target(ctx.env.schema) assert existing_target is not None if ctx.recompiling_schema_alias: ptr_cardinality = existing.get_cardinality(ctx.env.schema) ptr_required = existing.get_required(ctx.env.schema) if ptr_target == existing_target: ptrcls = existing elif ptr_target.implicitly_castable_to( existing_target, ctx.env.schema): ctx.env.schema = existing.set_target( ctx.env.schema, ptr_target) ptrcls = existing else: vnp = existing.get_verbosename( ctx.env.schema, with_parent=True) t1_vn = existing_target.get_verbosename(ctx.env.schema) t2_vn = ptr_target.get_verbosename(ctx.env.schema) if compexpr is not None: source_context = compexpr.context else: source_context = shape_el.expr.steps[-1].context raise errors.SchemaError( f'cannot redefine {vnp} as {t2_vn}', details=f'{vnp} is defined as {t1_vn}', context=source_context, ) else: ptrcls = schemactx.derive_ptr( derive_from, src_scls, ptr_target, derive_backlink=is_inbound_alias, derived_name=derived_name, ctx=ctx) elif ptrcls.get_target(ctx.env.schema) != ptr_target: ctx.env.schema = ptrcls.set_target(ctx.env.schema, ptr_target) assert ptrcls is not None if materialized and is_mutation and any( x.is_binding == irast.BindingKind.With and x.expr and inference.infer_volatility( x.expr, ctx.env, for_materialization=True).is_volatile() for reason in materialized if isinstance(reason, irast.MaterializeVisible) for _, x in reason.sets ): raise errors.QueryError( f'cannot refer to volatile WITH bindings from DML', context=compexpr and compexpr.context, ) if materialized and not is_mutation and ctx.qlstmt: assert ptrcls not in ctx.env.materialized_sets ctx.env.materialized_sets[ptrcls] = ctx.qlstmt, materialized if irexpr: setgen.maybe_materialize(ptrcls, irexpr, ctx=ctx) if qlexpr is not None: ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'defined_here', True ) if qlexpr is not None: ctx.source_map[ptrcls] = irast.ComputableInfo( qlexpr=qlexpr, irexpr=irexpr, context=ctx, path_id=path_id, path_id_ns=path_id_namespace, shape_op=shape_el.operation.op, should_materialize=materialized or [], ) if compexpr is not None or is_polymorphic or materialized: if (old_ptrref := ctx.env.ptr_ref_cache.get(ptrcls)): old_ptrref.is_computable = True ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'computable', True, ) ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'owned', True, ) if ptr_cardinality is not None: ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'cardinality', ptr_cardinality) ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'required', ptr_required) else: if qlexpr is None and ptrcls is not base_ptrcls: ctx.env.pointer_derivation_map[base_ptrcls].append(ptrcls) base_cardinality = None base_required = None if base_ptrcls is not None and not base_ptrcls_is_alias: base_cardinality = _get_base_ptr_cardinality(base_ptrcls, ctx=ctx) base_required = base_ptrcls.get_required(ctx.env.schema) if base_cardinality is None or not base_cardinality.is_known(): # If the base cardinality is not known the we can't make # any checks here and will rely on validation in the # cardinality inferer. specified_cardinality = shape_el.cardinality specified_required = shape_el.required else: specified_cardinality = base_cardinality # Inferred optionality overrides that of the base pointer # if base pointer is not `required`, hence the is True check. if shape_el.required is not None: specified_required = shape_el.required elif base_required is True: specified_required = base_required else: specified_required = None if ( shape_el.cardinality is not None and base_ptrcls is not None and shape_el.cardinality != base_cardinality ): base_src = base_ptrcls.get_source(ctx.env.schema) assert base_src is not None base_src_name = base_src.get_verbosename(ctx.env.schema) raise errors.SchemaError( f'cannot redefine the cardinality of ' f'{ptrcls.get_verbosename(ctx.env.schema)}: ' f'it is defined as {base_cardinality.as_ptr_qual()!r} ' f'in the base {base_src_name}', context=compexpr and compexpr.context, ) if ( shape_el.required is False and base_ptrcls is not None and base_required ): base_src = base_ptrcls.get_source(ctx.env.schema) assert base_src is not None base_src_name = base_src.get_verbosename(ctx.env.schema) raise errors.SchemaError( f'cannot redefine ' f'{ptrcls.get_verbosename(ctx.env.schema)} ' f'as optional: it is defined as required ' f'in the base {base_src_name}', context=compexpr and compexpr.context, ) ctx.env.pointer_specified_info[ptrcls] = ( specified_cardinality, specified_required, shape_el.context) ctx.env.schema = ptrcls.set_field_value( ctx.env.schema, 'cardinality', qltypes.SchemaCardinality.Unknown) # Prohibit update of readonly if exprtype.is_update() and ptrcls.get_readonly(ctx.env.schema): raise errors.QueryError( f'cannot update {ptrcls.get_verbosename(ctx.env.schema)}: ' f'it is declared as read-only', context=compexpr and compexpr.context, ) # Prohibit invalid operations on id and __type__ ptrcls_sn = ptrcls.get_shortname(ctx.env.schema) id_access = ( ptrcls_sn.name == 'id' and ( not ctx.env.options.allow_user_specified_id or not exprtype.is_mutation() ) ) if ( (compexpr is not None or is_polymorphic) and (id_access or ptrcls.is_protected_pointer(ctx.env.schema)) and not from_default ): if is_polymorphic: msg = (f'cannot access {ptrcls_sn.name} on a polymorphic ' f'shape element') else: msg = f'cannot assign to {ptrcls_sn.name}' if id_access and not ctx.env.options.allow_user_specified_id: hint = 'consider enabling the ""allow_user_specified_id"" configuration parameter to allow setting custom object ids' else: hint = None raise errors.QueryError(msg, context=shape_el.context, hint=hint) return ptrcls, irexpr " 32158,"def fetch_incidents(client: Client): max_results = arg_to_number(arg=demisto.params().get('max_fetch'), arg_name='max_fetch', required=False) first_fetch_time = arg_to_datetime(demisto.params().get('first_fetch')).isoformat() last_run = demisto.getLastRun() last_fetch = last_run.get('last_fetch', first_fetch_time) incidentsList=[] alert_response = client.correlation_alerts() incident_data = alert_response['Data'] for inc in incident_data: if len(incidentsList) > max_results: break incident_name = inc['CorrelationAlert']['NAME'] time_stamp = inc['CorrelationAlert']['CREATEDATE']+""Z"" severityLvl = int(inc['CorrelationAlert']['RISK']) if severityLvl >=0 and severityLvl <= 5: severity = 1 elif severityLvl > 5 and severityLvl <= 7: severity = 2 elif severityLvl > 7 and severityLvl <= 9: severity = 3 elif severityLvl > 9 and severityLvl <= 10: severity = 4 else: severity = 0 # ""log"" column is stringfyed 'Log' data. inc['Log'].pop(""log"") incidentObject = {**inc['Log'], **inc['CorrelationAlert']} incident = { 'name': incident_name, 'occurred': time_stamp, 'rawJSON': json.dumps(incidentObject), ""severity"": severity, 'type': 'Crpyotsim CorrelationAlert' } incidentsList.append(incident) # Save the next_run as a dict with the last_fetch key to be stored next_run = {'last_fetch': last_fetch} return next_run, incidentsList ","def fetch_incidents(client: Client): max_results = arg_to_number(arg=demisto.params().get('max_fetch'), arg_name='max_fetch', required=False) first_fetch_time = arg_to_datetime(demisto.params().get('first_fetch')).isoformat() last_run = demisto.getLastRun() last_fetch = last_run.get('last_fetch', first_fetch_time) incidentsList=[] alert_response = client.correlation_alerts() incident_data = alert_response['Data'] for inc in incident_data: if len(incidentsList) > max_results: break incident_name = inc['CorrelationAlert']['NAME'] time_stamp = inc['CorrelationAlert']['CREATEDATE']+""Z"" severityLvl = int(inc['CorrelationAlert']['RISK']) if severityLvl >=0 and severityLvl <= 5: severity = 1 elif severityLvl > 5 and severityLvl <= 7: severity = 2 elif severityLvl > 7 and severityLvl <= 9: severity = 3 elif severityLvl > 9 and severityLvl <= 10: severity = 4 else: severity = 0 # ""log"" column is stringfyed 'Log' data. inc['Log'].pop(""log"") incident_object = {**inc['Log'], **inc['CorrelationAlert']} incident = { 'name': incident_name, 'occurred': time_stamp, 'rawJSON': json.dumps(incidentObject), ""severity"": severity, 'type': 'Crpyotsim CorrelationAlert' } incidentsList.append(incident) # Save the next_run as a dict with the last_fetch key to be stored next_run = {'last_fetch': last_fetch} return next_run, incidentsList " 33131,"def l96s_tay2_step(x, t, dt, s): """"""Steps forward state of L96s model by order 2.0 Taylor scheme This is the method that must be used to generate the truth twin for this model due to the high-accuracy with respect to convergence in the strong sense. The ensemble model twin will be generated by the general integration functionality, with the diffusion set appropriately. This is the basic formulation which makes a Fourier truncation at p=1 for the simple form of the order 2.0 method. See `bib.grudzien2020numerical` for full details of the scheme and other versions."""""" # Infer system dimension sys_dim = len(x) # Compute the deterministic dxdt and the jacobian equations dx = dxdt(x) dxF = d2x_dtdx(x) # coefficients defined based on the p=1 Fourier truncation rho = 1.0/12.0 - 0.5 * np.pi**(-2) alpha = np.pi**2 / 180.0 - 0.5 * np.pi**(-2) # draw standard normal sample to define the # recursive Stratonovich integral coefficients rndm = np.random.standard_normal([5, sys_dim]) xi = rndm[0, :] mu = rndm[1, :] phi = rndm[2, :] zeta = rndm[3, :] eta = rndm[4, :] # define the auxiliary functions of random Fourier coefficients, a and b a = -2.0 * np.sqrt(dt * rho) * mu - np.sqrt(2.0*dt) * zeta / np.pi b = np.sqrt(dt * alpha) * phi + np.sqrt(dt / (2.0 * np.pi**2) ) * eta # vector of first order Stratonovich integrals J_pdelta = (dt/2.0) * (np.sqrt(dt) * xi + a) def Psi(l1, l2): # psi will be a generic function of the indicies l1 and l2, we will define # psi plus and psi minus via this psi = dt**2 * xi[l1] * xi[l2] / 3.0 + dt * a[l1] * a[l2] / 2.0 \ + dt**(1.5) * (xi[l1] * a[l2] + xi[l2] * a[l1]) / 4.0 \ - dt**(1.5) * (xi[l1] * b[l2] + xi[l2] * b[l1]) / (2.0 * np.pi) return psi # we define the approximations of the second order Stratonovich integral psi_plus = np.array([Psi((i-1) % sys_dim, (i+1) % sys_dim) for i in range(sys_dim)]) psi_minus = np.array([Psi((i-2) % sys_dim, (i-1) % sys_dim) for i in range(sys_dim)]) # the final vectorized step forward is given as x = x + dx * dt + dt**2 * 0.5 * dxF @ dx # deterministic taylor step x += s * np.sqrt(dt) * xi # stochastic euler step x += s * dxF @ J_pdelta # stochastic first order taylor step x += s**2 * (psi_plus - psi_minus) # stochastic second order taylor step return x ","def l96s_tay2_step(x, t, dt, s): """"""Steps forward state of L96s model by order 2.0 Taylor scheme This is the method that must be used to generate the truth twin for this model due to the high-accuracy with respect to convergence in the strong sense. The ensemble model twin will be generated by the general integration functionality, with the diffusion set appropriately. This is the basic formulation which makes a Fourier truncation at p=1 for the simple form of the order 2.0 method. See `bib.grudzien2020numerical` for full details of the scheme and other versions."""""" # Infer system dimension sys_dim = len(x) # Compute the deterministic dxdt and the jacobian equations dx = dxdt(x) dxF = d2x_dtdx(x) # coefficients defined based on the p=1 Fourier truncation rho = 1.0/12.0 - 0.5 * np.pi**(-2) alpha = np.pi**2 / 180.0 - 0.5 * np.pi**(-2) # draw standard normal sample to define the # recursive Stratonovich integral coefficients rndm = np.random.standard_normal([5, sys_dim]) xi, mu, phi, zeta, eta = rndm # define the auxiliary functions of random Fourier coefficients, a and b a = -2.0 * np.sqrt(dt * rho) * mu - np.sqrt(2.0*dt) * zeta / np.pi b = np.sqrt(dt * alpha) * phi + np.sqrt(dt / (2.0 * np.pi**2) ) * eta # vector of first order Stratonovich integrals J_pdelta = (dt/2.0) * (np.sqrt(dt) * xi + a) def Psi(l1, l2): # psi will be a generic function of the indicies l1 and l2, we will define # psi plus and psi minus via this psi = dt**2 * xi[l1] * xi[l2] / 3.0 + dt * a[l1] * a[l2] / 2.0 \ + dt**(1.5) * (xi[l1] * a[l2] + xi[l2] * a[l1]) / 4.0 \ - dt**(1.5) * (xi[l1] * b[l2] + xi[l2] * b[l1]) / (2.0 * np.pi) return psi # we define the approximations of the second order Stratonovich integral psi_plus = np.array([Psi((i-1) % sys_dim, (i+1) % sys_dim) for i in range(sys_dim)]) psi_minus = np.array([Psi((i-2) % sys_dim, (i-1) % sys_dim) for i in range(sys_dim)]) # the final vectorized step forward is given as x = x + dx * dt + dt**2 * 0.5 * dxF @ dx # deterministic taylor step x += s * np.sqrt(dt) * xi # stochastic euler step x += s * dxF @ J_pdelta # stochastic first order taylor step x += s**2 * (psi_plus - psi_minus) # stochastic second order taylor step return x " 34925,"def check_numerical_grads(function, input_values, grad_values, function_value=None, delta=1e-3, atol=1e-2, rtol=0.1, acceptable_fail_percentage=None): """"""A helper function that checks that numerical gradients of a function are equal to gradients computed in some different way (analytical gradients). Numerical gradients are computed using finite difference approximation. To reduce the number of function evaluations, the number of points used is gradually increased if the error value is too high (up to 5 points). Parameters ---------- function A function that takes inputs either as positional or as keyword arguments (either `function(*input_values)` or `function(**input_values)` should be correct) and returns a scalar result. Should accept numpy ndarrays. input_values : Dict[str, numpy.ndarray] or List[numpy.ndarray] A list of values or a dict assigning values to variables. Represents the point at which gradients should be computed. grad_values : Dict[str, numpy.ndarray] or List[numpy.ndarray] Gradients computed using a different method. function_value : float, optional Should be equal to `function(**input_values)`. delta : float, optional A small number used for numerical computation of partial derivatives. The default 1e-3 is a good choice for float32. atol : float, optional Absolute tolerance. Gets multiplied by `sqrt(n)` where n is the size of a gradient. rtol : float, optional Relative tolerance. acceptable_fail_percentage : float, optional If not None, raise an error only when the fraction of wrong elements for a gradient is higher than this value. """""" # If input_values is a list then function accepts positional arguments # In this case transform it to a function taking kwargs of the form {""0"": ..., ""1"": ...} if not isinstance(input_values, dict): input_len = len(input_values) input_values = {str(idx): val for idx, val in enumerate(input_values)} def _function(_input_len=input_len, _orig_function=function, **kwargs): return _orig_function(*(kwargs[str(i)] for i in range(input_len))) function = _function grad_values = {str(idx): val for idx, val in enumerate(grad_values)} if function_value is None: function_value = function(**input_values) # a helper to modify j-th element of val by a_delta def modify(val, j, a_delta): val = val.copy() val.reshape(-1)[j] = val.reshape(-1)[j] + a_delta return val # numerically compute a partial derivative with respect to j-th element of the var `name` def derivative(x_name, j, a_delta): modified_values = {n: modify(val, j, a_delta) if n == x_name else val for n, val in input_values.items()} return (function(**modified_values) - function_value)/a_delta def compare_derivative(j, n_der, grad): der = grad.reshape(-1)[j] return np.abs(n_der - der) < atol + rtol*np.abs(n_der) for x_name, grad in grad_values.items(): if grad.shape != input_values[x_name].shape: raise AssertionError( ""Gradient wrt '{}' has unexpected shape {}, expected {} "" .format(x_name, grad.shape, input_values[x_name].shape)) ngrad = np.zeros_like(grad) wrong_positions = [] # compute partial derivatives for each position in this variable for j in range(np.prod(grad.shape)): # forward difference approximation nder = derivative(x_name, j, delta) # if the derivative is not equal to the analytical one, try to use more # precise and expensive methods if not compare_derivative(j, nder, grad): # central difference approximation nder = (derivative(x_name, j, -delta) + nder)/2 if not compare_derivative(j, nder, grad): # central difference approximation using h = delta/2 cnder2 = (derivative(x_name, j, delta/2) + derivative(x_name, j, -delta/2))/2 # five-point derivative nder = (4*cnder2 - nder)/3 # if the derivatives still don't match, add this position to the # list of wrong positions if not compare_derivative(j, nder, grad): wrong_positions.append(np.unravel_index(j, grad.shape)) ngrad.reshape(-1)[j] = nder wrong_percentage = len(wrong_positions)/np.prod(grad.shape) dist = np.sqrt(np.sum((ngrad - grad)**2)) grad_norm = np.sqrt(np.sum(ngrad**2)) if not (np.isfinite(dist) and np.isfinite(grad_norm)): raise ValueError( ""NaN or infinity detected during numerical gradient checking wrt '{}'\n"" ""analytical grad = {}\n numerical grad = {}\n"" .format(x_name, grad, ngrad)) # we multiply atol by this number to make it more universal for different sizes sqrt_n = np.sqrt(float(np.prod(grad.shape))) if dist > atol*sqrt_n + rtol*grad_norm: enough_failures = (acceptable_fail_percentage is None or wrong_percentage > acceptable_fail_percentage) if enough_failures: raise AssertionError( ""Analytical and numerical grads wrt '{}' differ too much\n"" ""analytical grad = {}\n numerical grad = {}\n"" ""{}% of elements differ, first 10 of wrong positions: {}\n"" ""distance > atol*sqrt(n) + rtol*grad_norm\n"" ""distance {} > {}*{} + {}*{}"" .format(x_name, grad, ngrad, int(100*wrong_percentage), wrong_positions[:10], dist, atol, sqrt_n, rtol, grad_norm)) else: logging.warning(""Analytical and numerical grads wrt '%s' differ, however "" ""there were not enough wrong elements to raise an error "" ""(only %d%%)"", x_name, int(100*wrong_percentage)) max_diff = np.max(np.abs(ngrad - grad)) avg_diff = np.mean(np.abs(ngrad - grad)) logging.info(""Numerical grad test wrt '%s' of shape %s passes, "" ""dist = %f, max_diff = %f, avg_diff = %f"", x_name, grad.shape, dist, max_diff, avg_diff) ","def check_numerical_grads(function, input_values, grad_values, function_value=None, delta=1e-3, atol=1e-2, rtol=0.1, acceptable_fail_percentage=None): """"""A helper function that checks that numerical gradients of a function are equal to gradients computed in some different way (analytical gradients). Numerical gradients are computed using finite difference approximation. To reduce the number of function evaluations, the number of points used is gradually increased if the error value is too high (up to 5 points). Parameters ---------- function A function that takes inputs either as positional or as keyword arguments (either `function(*input_values)` or `function(**input_values)` should be correct) and returns a scalar result. Should accept numpy ndarrays. input_values : Dict[str, numpy.ndarray] or List[numpy.ndarray] A list of values or a dict assigning values to variables. Represents the point at which gradients should be computed. grad_values : Dict[str, numpy.ndarray] or List[numpy.ndarray] Gradients computed using a different method. function_value : float, optional Should be equal to `function(**input_values)`. delta : float, optional A small number used for numerical computation of partial derivatives. The default 1e-3 is a good choice for float32. atol : float, optional Absolute tolerance. Gets multiplied by `sqrt(n)` where n is the size of a gradient. rtol : float, optional Relative tolerance. acceptable_fail_percentage : Optional[float] If not None, raise an error only when the fraction of wrong elements for a gradient is higher than this value. """""" # If input_values is a list then function accepts positional arguments # In this case transform it to a function taking kwargs of the form {""0"": ..., ""1"": ...} if not isinstance(input_values, dict): input_len = len(input_values) input_values = {str(idx): val for idx, val in enumerate(input_values)} def _function(_input_len=input_len, _orig_function=function, **kwargs): return _orig_function(*(kwargs[str(i)] for i in range(input_len))) function = _function grad_values = {str(idx): val for idx, val in enumerate(grad_values)} if function_value is None: function_value = function(**input_values) # a helper to modify j-th element of val by a_delta def modify(val, j, a_delta): val = val.copy() val.reshape(-1)[j] = val.reshape(-1)[j] + a_delta return val # numerically compute a partial derivative with respect to j-th element of the var `name` def derivative(x_name, j, a_delta): modified_values = {n: modify(val, j, a_delta) if n == x_name else val for n, val in input_values.items()} return (function(**modified_values) - function_value)/a_delta def compare_derivative(j, n_der, grad): der = grad.reshape(-1)[j] return np.abs(n_der - der) < atol + rtol*np.abs(n_der) for x_name, grad in grad_values.items(): if grad.shape != input_values[x_name].shape: raise AssertionError( ""Gradient wrt '{}' has unexpected shape {}, expected {} "" .format(x_name, grad.shape, input_values[x_name].shape)) ngrad = np.zeros_like(grad) wrong_positions = [] # compute partial derivatives for each position in this variable for j in range(np.prod(grad.shape)): # forward difference approximation nder = derivative(x_name, j, delta) # if the derivative is not equal to the analytical one, try to use more # precise and expensive methods if not compare_derivative(j, nder, grad): # central difference approximation nder = (derivative(x_name, j, -delta) + nder)/2 if not compare_derivative(j, nder, grad): # central difference approximation using h = delta/2 cnder2 = (derivative(x_name, j, delta/2) + derivative(x_name, j, -delta/2))/2 # five-point derivative nder = (4*cnder2 - nder)/3 # if the derivatives still don't match, add this position to the # list of wrong positions if not compare_derivative(j, nder, grad): wrong_positions.append(np.unravel_index(j, grad.shape)) ngrad.reshape(-1)[j] = nder wrong_percentage = len(wrong_positions)/np.prod(grad.shape) dist = np.sqrt(np.sum((ngrad - grad)**2)) grad_norm = np.sqrt(np.sum(ngrad**2)) if not (np.isfinite(dist) and np.isfinite(grad_norm)): raise ValueError( ""NaN or infinity detected during numerical gradient checking wrt '{}'\n"" ""analytical grad = {}\n numerical grad = {}\n"" .format(x_name, grad, ngrad)) # we multiply atol by this number to make it more universal for different sizes sqrt_n = np.sqrt(float(np.prod(grad.shape))) if dist > atol*sqrt_n + rtol*grad_norm: enough_failures = (acceptable_fail_percentage is None or wrong_percentage > acceptable_fail_percentage) if enough_failures: raise AssertionError( ""Analytical and numerical grads wrt '{}' differ too much\n"" ""analytical grad = {}\n numerical grad = {}\n"" ""{}% of elements differ, first 10 of wrong positions: {}\n"" ""distance > atol*sqrt(n) + rtol*grad_norm\n"" ""distance {} > {}*{} + {}*{}"" .format(x_name, grad, ngrad, int(100*wrong_percentage), wrong_positions[:10], dist, atol, sqrt_n, rtol, grad_norm)) else: logging.warning(""Analytical and numerical grads wrt '%s' differ, however "" ""there were not enough wrong elements to raise an error "" ""(only %d%%)"", x_name, int(100*wrong_percentage)) max_diff = np.max(np.abs(ngrad - grad)) avg_diff = np.mean(np.abs(ngrad - grad)) logging.info(""Numerical grad test wrt '%s' of shape %s passes, "" ""dist = %f, max_diff = %f, avg_diff = %f"", x_name, grad.shape, dist, max_diff, avg_diff) " 11548,"def _filtered_annotations(session, ids): annotations = ( _eager_loaded_annotations(session) .execution_options(stream_results=True) .filter(_annotation_filter()) .filter(models.Annotation.id.in_(ids)) ) for annotation in annotations: yield annotation ","def _filtered_annotations(session, ids): annotations = ( _eager_loaded_annotations(session) .execution_options(stream_results=True) .filter(_annotation_filter()) .filter(models.Annotation.id.in_(ids)) ) yield from annotations " 14194,"def _initialise_testbench(argv_): """"""Initialize testbench. This function is called after the simulator has elaborated all entities and is ready to run the test. The test must be defined by the environment variables :envvar:`MODULE` and :envvar:`TESTCASE`. The environment variable :envvar:`COCOTB_HOOKS`, if present, contains a comma-separated list of modules to be executed before the first test. """""" _rlock.acquire() if ""COCOTB_LIBRARY_COVERAGE"" in os.environ: import coverage global _library_coverage _library_coverage = coverage.coverage( data_file="".coverage.cocotb"", branch=True, include=[""{}/*"".format(os.path.dirname(__file__))]) _library_coverage.start() global argc, argv argv = argv_ argc = len(argv) root_name = os.getenv(""TOPLEVEL"") if root_name is not None: if root_name == """": root_name = None elif '.' in root_name: # Skip any library component of the toplevel root_name = root_name.split(""."", 1)[1] # sys.path normally includes """" (the current directory), but does not appear to when python is embedded. # Add it back because users expect to be able to import files in their test directory. # TODO: move this to gpi_embed.cpp sys.path.insert(0, """") _setup_logging() # From https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners # If the user doesn't want to see these, they can always change the global # warning settings in their test module. if not sys.warnoptions: warnings.simplefilter(""default"") from cocotb import simulator global SIM_NAME, SIM_VERSION SIM_NAME = simulator.get_simulator_product().strip() SIM_VERSION = simulator.get_simulator_version().strip() cocotb.log.info(""Running on {} version {}"".format(SIM_NAME, SIM_VERSION)) memcheck_port = os.getenv('MEMCHECK') if memcheck_port is not None: mem_debug(int(memcheck_port)) log.info(""Running tests with cocotb v%s from %s"" % (__version__, os.path.dirname(__file__))) # Create the base handle type process_plusargs() global scheduler scheduler = Scheduler() # Seed the Python random number generator to make this repeatable global RANDOM_SEED RANDOM_SEED = os.getenv('RANDOM_SEED') if RANDOM_SEED is None: if 'ntb_random_seed' in plusargs: RANDOM_SEED = eval(plusargs['ntb_random_seed']) elif 'seed' in plusargs: RANDOM_SEED = eval(plusargs['seed']) else: RANDOM_SEED = int(time.time()) log.info(""Seeding Python random module with %d"" % (RANDOM_SEED)) else: RANDOM_SEED = int(RANDOM_SEED) log.info(""Seeding Python random module with supplied seed %d"" % (RANDOM_SEED)) random.seed(RANDOM_SEED) simulator.set_random_seed(RANDOM_SEED) # Setup DUT object from cocotb import simulator handle = simulator.get_root_handle(root_name) if not handle: raise RuntimeError(""Can not find root handle ({})"".format(root_name)) global top top = cocotb.handle.SimHandle(handle) # start Regression Manager global regression_manager regression_manager = RegressionManager.from_discovery(top) regression_manager.execute() _rlock.release() return True ","def _initialise_testbench(argv_): """"""Initialize testbench. This function is called after the simulator has elaborated all entities and is ready to run the test. The test must be defined by the environment variables :envvar:`MODULE` and :envvar:`TESTCASE`. The environment variable :envvar:`COCOTB_HOOKS`, if present, contains a comma-separated list of modules to be executed before the first test. """""" _rlock.acquire() if ""COCOTB_LIBRARY_COVERAGE"" in os.environ: import coverage global _library_coverage _library_coverage = coverage.coverage( data_file="".coverage.cocotb"", branch=True, include=[""{}/*"".format(os.path.dirname(__file__))]) _library_coverage.start() global argc, argv argv = argv_ argc = len(argv) root_name = os.getenv(""TOPLEVEL"") if root_name is not None: if root_name == """": root_name = None elif '.' in root_name: # Skip any library component of the toplevel root_name = root_name.split(""."", 1)[1] # sys.path normally includes """" (the current directory), but does not appear to when python is embedded. # Add it back because users expect to be able to import files in their test directory. # TODO: move this to gpi_embed.cpp sys.path.insert(0, """") _setup_logging() # From https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners # If the user doesn't want to see these, they can always change the global # warning settings in their test module. if not sys.warnoptions: warnings.simplefilter(""default"") from cocotb import simulator global SIM_NAME, SIM_VERSION SIM_NAME = simulator.get_simulator_product().strip() SIM_VERSION = simulator.get_simulator_version().strip() cocotb.log.info(""Running on {} version {}"".format(SIM_NAME, SIM_VERSION)) memcheck_port = os.getenv('MEMCHECK') if memcheck_port is not None: mem_debug(int(memcheck_port)) log.info(""Running tests with cocotb v%s from %s"" % (__version__, os.path.dirname(__file__))) # Create the base handle type process_plusargs() global scheduler scheduler = Scheduler() # Seed the Python random number generator to make this repeatable global RANDOM_SEED RANDOM_SEED = os.getenv('RANDOM_SEED') if RANDOM_SEED is None: if 'ntb_random_seed' in plusargs: RANDOM_SEED = eval(plusargs['ntb_random_seed']) elif 'seed' in plusargs: RANDOM_SEED = eval(plusargs['seed']) else: RANDOM_SEED = int(time.time()) log.info(""Seeding Python random module with %d"" % (RANDOM_SEED)) else: RANDOM_SEED = int(RANDOM_SEED) log.info(""Seeding cocotb's Python and GPI parts with supplied seed %d"" % (RANDOM_SEED)) random.seed(RANDOM_SEED) simulator.set_random_seed(RANDOM_SEED) # Setup DUT object from cocotb import simulator handle = simulator.get_root_handle(root_name) if not handle: raise RuntimeError(""Can not find root handle ({})"".format(root_name)) global top top = cocotb.handle.SimHandle(handle) # start Regression Manager global regression_manager regression_manager = RegressionManager.from_discovery(top) regression_manager.execute() _rlock.release() return True " 43041,"def sample_coherent( alpha: list, t: float, Ul: np.ndarray, w: np.ndarray, n_samples: int, loss: float = 0.0, ) -> list: r""""""Generate samples for simulating vibrational quantum dynamics with an input Fock state. **Example usage:** >>> alpha = [0, 1.4] >>> t = 10.0 >>> Ul = np.array([[0.707106781, -0.707106781], >>> [0.707106781, 0.707106781]]) >>> w = np.array([3914.92, 3787.59]) >>> n_samples = 5 >>> sample_coherent(alpha, t, Ul, w, n_samples) [[0, 2], [0, 4], [0, 3], [0, 1], [0, 2]] Args: alpha (list): list of displacement parameters t (float): time in femtoseconds Ul (array): normal-to-local transformation matrix w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}` n_samples (int): number of samples to be generated loss (float): loss parameter denoting the fraction of lost photons Returns: list[list[int]]: a list of samples """""" if n_samples < 1: raise ValueError(""Number of samples must be at least one"") if t < 0: raise ValueError(""Time must be zero or positive"") if np.any(w <= 0): raise ValueError(""Vibrational frequencies must be larger than zero"") if np.any(np.iscomplex(Ul)): raise ValueError(""The normal mode to local mode transformation matrix must be real"") if not 0 <= loss <= 1: raise ValueError(""Loss parameter must take a value between zero and one"") if not len(alpha) == len(Ul): raise ValueError( ""Number of displacement parameters and the number of modes in the normal-to-local"" "" transformation matrix must be equal"" ) modes = len(Ul) op = evolution(modes) eng = sf.LocalEngine(backend=""gaussian"") prog = sf.Program(modes) # pylint: disable=expression-not-assigned with prog.context as q: for i in range(modes): sf.ops.Dgate(alpha[i], 0) | q[i] op(t, Ul, w) | q if loss: for _q in q: sf.ops.LossChannel(1 - loss) | _q sf.ops.MeasureFock() | q with warnings.catch_warnings(): warnings.filterwarnings(""ignore"", category=UserWarning, message=""Cannot simulate non-"") s = eng.run(prog, shots=n_samples).samples return s.tolist() ","def sample_coherent( alpha: list, t: float, Ul: np.ndarray, w: np.ndarray, n_samples: int, loss: float = 0.0, ) -> list: r""""""Generate samples for simulating vibrational quantum dynamics with an input coherent state. **Example usage:** >>> alpha = [0, 1.4] >>> t = 10.0 >>> Ul = np.array([[0.707106781, -0.707106781], >>> [0.707106781, 0.707106781]]) >>> w = np.array([3914.92, 3787.59]) >>> n_samples = 5 >>> sample_coherent(alpha, t, Ul, w, n_samples) [[0, 2], [0, 4], [0, 3], [0, 1], [0, 2]] Args: alpha (list): list of displacement parameters t (float): time in femtoseconds Ul (array): normal-to-local transformation matrix w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}` n_samples (int): number of samples to be generated loss (float): loss parameter denoting the fraction of lost photons Returns: list[list[int]]: a list of samples """""" if n_samples < 1: raise ValueError(""Number of samples must be at least one"") if t < 0: raise ValueError(""Time must be zero or positive"") if np.any(w <= 0): raise ValueError(""Vibrational frequencies must be larger than zero"") if np.any(np.iscomplex(Ul)): raise ValueError(""The normal mode to local mode transformation matrix must be real"") if not 0 <= loss <= 1: raise ValueError(""Loss parameter must take a value between zero and one"") if not len(alpha) == len(Ul): raise ValueError( ""Number of displacement parameters and the number of modes in the normal-to-local"" "" transformation matrix must be equal"" ) modes = len(Ul) op = evolution(modes) eng = sf.LocalEngine(backend=""gaussian"") prog = sf.Program(modes) # pylint: disable=expression-not-assigned with prog.context as q: for i in range(modes): sf.ops.Dgate(alpha[i], 0) | q[i] op(t, Ul, w) | q if loss: for _q in q: sf.ops.LossChannel(1 - loss) | _q sf.ops.MeasureFock() | q with warnings.catch_warnings(): warnings.filterwarnings(""ignore"", category=UserWarning, message=""Cannot simulate non-"") s = eng.run(prog, shots=n_samples).samples return s.tolist() " 35232,"def asarray_chkfinite(a, dtype=None, order=None): """""" Convert the given input to an array, and raises an error if the input is NaNs or Infs. Args: a: array like. dtype: data type, optional order: {'C', 'F'}, optional Returns: out (cupy.ndarray): Array interpretation of `a`. .. seealso:: :func:`numpy.asarray_chkfinite` """""" a = cupy.asarray(a, dtype=dtype, order=order) if not cupy.isfinite(a).all(): raise ValueError( ""Array must not contain infs or nans"") return a ","def asarray_chkfinite(a, dtype=None, order=None): """""" Convert the given input to an array, and raises an error if the input is NaNs or Infs. Args: a: array like. dtype: data type, optional order: {'C', 'F'}, optional Returns: cupy.ndarray: Array interpretation of `a`. .. seealso:: :func:`numpy.asarray_chkfinite` """""" a = cupy.asarray(a, dtype=dtype, order=order) if not cupy.isfinite(a).all(): raise ValueError( ""Array must not contain infs or nans"") return a " 9198,"def requestMock( path: bytes, method: bytes = b""GET"", host: bytes = b""localhost"", port: int = 8080, isSecure: bool = False, body: Optional[bytes] = None, headers: Optional[Mapping[bytes, Sequence[bytes]]] = None, ) -> IRequest: if not headers: headers = {} if not body: body = b"""" path, qpath = (path.split(b""?"", 1) + [b""""])[:2] request = server.Request(DummyChannel(), False) request.site = Mock(server.Site) request.gotLength(len(body)) request.content = BytesIO() request.content.write(body) request.content.seek(0) request.args = parse_qs(qpath) request.requestHeaders = Headers(headers) request.setHost(host, port, isSecure) request.uri = path request.prepath = [] request.postpath = path.split(b""/"")[1:] request.method = method request.clientproto = b""HTTP/1.1"" request.setHeader = Mock(wraps=request.setHeader) request.setResponseCode = Mock(wraps=request.setResponseCode) request._written = BytesIO() request.finishCount = 0 request.writeCount = 0 def registerProducer(producer, streaming): request.producer = producer for _ in range(2): if request.producer: request.producer.resumeProducing() def unregisterProducer(): request.producer = None def finish(): request.finishCount += 1 if not request.startedWriting: request.write(b"""") if not request.finished: request.finished = True request._cleanup() def write(data): request.writeCount += 1 request.startedWriting = True if not request.finished: request._written.write(data) else: raise RuntimeError( ""Request.write called on a request after "" ""Request.finish was called."" ) def getWrittenData(): return request._written.getvalue() request.finish = finish request.write = write request.getWrittenData = getWrittenData request.registerProducer = registerProducer request.unregisterProducer = unregisterProducer request.processingFailed = Mock(wraps=request.processingFailed) return request ","def requestMock( path: bytes, method: bytes = b""GET"", host: bytes = b""localhost"", port: int = 8080, isSecure: bool = False, body: Optional[bytes] = None, headers: Mapping[bytes, Sequence[bytes]] = {}, ) -> IRequest: if not headers: headers = {} if not body: body = b"""" path, qpath = (path.split(b""?"", 1) + [b""""])[:2] request = server.Request(DummyChannel(), False) request.site = Mock(server.Site) request.gotLength(len(body)) request.content = BytesIO() request.content.write(body) request.content.seek(0) request.args = parse_qs(qpath) request.requestHeaders = Headers(headers) request.setHost(host, port, isSecure) request.uri = path request.prepath = [] request.postpath = path.split(b""/"")[1:] request.method = method request.clientproto = b""HTTP/1.1"" request.setHeader = Mock(wraps=request.setHeader) request.setResponseCode = Mock(wraps=request.setResponseCode) request._written = BytesIO() request.finishCount = 0 request.writeCount = 0 def registerProducer(producer, streaming): request.producer = producer for _ in range(2): if request.producer: request.producer.resumeProducing() def unregisterProducer(): request.producer = None def finish(): request.finishCount += 1 if not request.startedWriting: request.write(b"""") if not request.finished: request.finished = True request._cleanup() def write(data): request.writeCount += 1 request.startedWriting = True if not request.finished: request._written.write(data) else: raise RuntimeError( ""Request.write called on a request after "" ""Request.finish was called."" ) def getWrittenData(): return request._written.getvalue() request.finish = finish request.write = write request.getWrittenData = getWrittenData request.registerProducer = registerProducer request.unregisterProducer = unregisterProducer request.processingFailed = Mock(wraps=request.processingFailed) return request " 5412,"def test_rename(): """""" Test if the source file exists on the system, rename it to the named file. """""" name = ""/tmp/salt"" source = ""/tmp/salt/salt"" ret = {""name"": name, ""result"": False, ""comment"": """", ""changes"": {}} comt = ""Must provide name to file.rename"" ret.update({""comment"": comt, ""name"": """"}) assert filestate.rename("""", source) == ret mock_t = MagicMock(return_value=True) mock_f = MagicMock(return_value=False) mock_lex = MagicMock(side_effect=[False, True, True]) with patch.object(os.path, ""isabs"", mock_f): comt = ""Specified file {} is not an absolute path"".format(name) ret.update({""comment"": comt, ""name"": name}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(return_value=False) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): comt = 'Source file ""{}"" has already been moved out of ' ""place"".format( source ) ret.update({""comment"": comt, ""result"": True}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(side_effect=[True, True, True]) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): comt = 'The target file ""{}"" exists and will not be ' ""overwritten"".format( name ) ret.update({""comment"": comt, ""result"": True}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(side_effect=[True, True, True]) mock_rem = MagicMock(side_effect=IOError) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): with patch.dict(filestate.__opts__, {""test"": False}): comt = 'Failed to delete ""{}"" in preparation for ' ""forced move"".format( name ) with patch.dict(filestate.__salt__, {""file.remove"": mock_rem}): ret.update({""name"": name, ""comment"": comt, ""result"": False}) assert filestate.rename(name, source, force=True) == ret mock_lex = MagicMock(side_effect=[True, False, False]) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): with patch.dict(filestate.__opts__, {""test"": True}): comt = 'File ""{}"" is set to be moved to ""{}""'.format(source, name) ret.update({""name"": name, ""comment"": comt, ""result"": None}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(side_effect=[True, False, False]) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): with patch.object(os.path, ""isdir"", mock_f): with patch.dict(filestate.__opts__, {""test"": False}): comt = ""The target directory /tmp is not present"" ret.update({""name"": name, ""comment"": comt, ""result"": False}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(side_effect=[True, False, False]) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): with patch.object(os.path, ""isdir"", mock_t): with patch.object(os.path, ""islink"", mock_f): with patch.dict(filestate.__opts__, {""test"": False}): with patch.object( shutil, ""move"", MagicMock(side_effect=IOError) ): comt = 'Failed to move ""{}"" to ""{}""'.format(source, name) ret.update({""name"": name, ""comment"": comt, ""result"": False}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(side_effect=[True, False, False]) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): with patch.object(os.path, ""isdir"", mock_t): with patch.object(os.path, ""islink"", mock_f): with patch.dict(filestate.__opts__, {""test"": False}): with patch.object(shutil, ""move"", MagicMock()): comt = 'Moved ""{}"" to ""{}""'.format(source, name) ret.update( { ""name"": name, ""comment"": comt, ""result"": True, ""changes"": {name: source}, } ) assert filestate.rename(name, source) == ret ","def test_rename(): """""" Test if the source file exists on the system, rename it to the named file. """""" name = ""/tmp/salt"" source = ""/tmp/salt/salt"" ret = {""name"": name, ""result"": False, ""comment"": """", ""changes"": {}} comt = ""Must provide name to file.rename"" ret.update({""comment"": comt, ""name"": """"}) assert filestate.rename("""", source) == ret mock_t = MagicMock(return_value=True) mock_f = MagicMock(return_value=False) mock_lex = MagicMock(side_effect=[False, True, True]) with patch.object(os.path, ""isabs"", mock_f): comt = ""Specified file {} is not an absolute path"".format(name) ret.update({""comment"": comt, ""name"": name}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(return_value=False) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): comt = 'Source file ""{}"" has already been moved out of ' ""place"".format( source ) ret.update({""comment"": comt, ""result"": True}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(side_effect=[True, True, True]) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): comt = 'The target file ""{}"" exists and will not be overwritten'.format( name ) ret.update({""comment"": comt, ""result"": True}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(side_effect=[True, True, True]) mock_rem = MagicMock(side_effect=IOError) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): with patch.dict(filestate.__opts__, {""test"": False}): comt = 'Failed to delete ""{}"" in preparation for ' ""forced move"".format( name ) with patch.dict(filestate.__salt__, {""file.remove"": mock_rem}): ret.update({""name"": name, ""comment"": comt, ""result"": False}) assert filestate.rename(name, source, force=True) == ret mock_lex = MagicMock(side_effect=[True, False, False]) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): with patch.dict(filestate.__opts__, {""test"": True}): comt = 'File ""{}"" is set to be moved to ""{}""'.format(source, name) ret.update({""name"": name, ""comment"": comt, ""result"": None}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(side_effect=[True, False, False]) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): with patch.object(os.path, ""isdir"", mock_f): with patch.dict(filestate.__opts__, {""test"": False}): comt = ""The target directory /tmp is not present"" ret.update({""name"": name, ""comment"": comt, ""result"": False}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(side_effect=[True, False, False]) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): with patch.object(os.path, ""isdir"", mock_t): with patch.object(os.path, ""islink"", mock_f): with patch.dict(filestate.__opts__, {""test"": False}): with patch.object( shutil, ""move"", MagicMock(side_effect=IOError) ): comt = 'Failed to move ""{}"" to ""{}""'.format(source, name) ret.update({""name"": name, ""comment"": comt, ""result"": False}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(side_effect=[True, False, False]) with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""lexists"", mock_lex): with patch.object(os.path, ""isdir"", mock_t): with patch.object(os.path, ""islink"", mock_f): with patch.dict(filestate.__opts__, {""test"": False}): with patch.object(shutil, ""move"", MagicMock()): comt = 'Moved ""{}"" to ""{}""'.format(source, name) ret.update( { ""name"": name, ""comment"": comt, ""result"": True, ""changes"": {name: source}, } ) assert filestate.rename(name, source) == ret " 30662,"def file_command(client, args): files = argToList(args.get('file')) all_results = [] for file in files: hash_type = get_hash_type(file) if hash_type != ""Unknown"": res = client.get_hash_reputation(hash_type, file) analysis_info = { hash_type.upper(): file, 'Found': res.get('found'), 'Verdict': res.get('verdict'), 'Score': res.get('score'), 'Malware-families': res.get('malware_families') } score = Common.DBotScore.NONE malicious = None if res[""found""]: if res[""verdict""]: score = Common.DBotScore.BAD malicious = ""TDS Polygon score: {}"".format(res['score']) if res.get('malware_families'): malicious += "", {}"".format("", "".join(res[""malware_families""])) else: score = Common.DBotScore.GOOD dbot_score = Common.DBotScore( indicator=file, indicator_type=DBotScoreType.FILE, integration_name=INTEGRATION_NAME, score=score, malicious_description=malicious ) indicator = Common.File(**{hash_type: file, ""dbot_score"": dbot_score}) result = CommandResults( outputs_prefix=""Polygon.Analysis"", outputs_key_field=hash_type.upper(), outputs=analysis_info, indicators=[indicator] ) return_results(result) all_results.append(result) return all_results ","def file_command(client, args): files = argToList(args.get('file')) all_results = [] for file in files: hash_type = get_hash_type(file) if hash_type != ""Unknown"": res = client.get_hash_reputation(hash_type, file) analysis_info = { hash_type.upper(): file, 'Found': res.get('found'), 'Verdict': res.get('verdict'), 'Score': res.get('score'), 'Malware-families': res.get('malware_families') } score = Common.DBotScore.NONE malicious = None if res[""found""]: if res.get(""verdict""): score = Common.DBotScore.BAD malicious = ""TDS Polygon score: {}"".format(res['score']) if res.get('malware_families'): malicious += "", {}"".format("", "".join(res[""malware_families""])) else: score = Common.DBotScore.GOOD dbot_score = Common.DBotScore( indicator=file, indicator_type=DBotScoreType.FILE, integration_name=INTEGRATION_NAME, score=score, malicious_description=malicious ) indicator = Common.File(**{hash_type: file, ""dbot_score"": dbot_score}) result = CommandResults( outputs_prefix=""Polygon.Analysis"", outputs_key_field=hash_type.upper(), outputs=analysis_info, indicators=[indicator] ) return_results(result) all_results.append(result) return all_results " 3085,"def test_apply_function_adds_a_row(): # GH: 33058 df = pd.DataFrame( {""col1"": [""A"", ""A"", ""A"", ""B"", ""B"", ""B""], ""col2"": [1, 2, 3, 4, 5, 6]} ) def fn(x): x.col2[x.index[-1]] = 0 return x.col2 result = df.groupby([""col1""], as_index=False).apply(fn) expected = pd.Series( [1, 2, 0, 4, 5, 0], index=pd.MultiIndex.from_tuples( [(0, 0), (0, 1), (0, 2), (1, 3), (1, 4), (1, 5)] ), name=""col2"", ) tm.assert_series_equal(result, expected) ","def test_apply_function_with_indexing(): # GH: 33058 df = pd.DataFrame( {""col1"": [""A"", ""A"", ""A"", ""B"", ""B"", ""B""], ""col2"": [1, 2, 3, 4, 5, 6]} ) def fn(x): x.col2[x.index[-1]] = 0 return x.col2 result = df.groupby([""col1""], as_index=False).apply(fn) expected = pd.Series( [1, 2, 0, 4, 5, 0], index=pd.MultiIndex.from_tuples( [(0, 0), (0, 1), (0, 2), (1, 3), (1, 4), (1, 5)] ), name=""col2"", ) tm.assert_series_equal(result, expected) " 44230,"def quantum_fisher(qnode, *args, **kwargs): r""""""Returns a function that computes the quantum fisher information matrix (QFIM) of a given :class:`.QNode`. Given a parametrized quantum state :math:`|\psi(\bm{\theta})\rangle`, the quantum fisher information matrix (QFIM) quantifies how changes to the parameters :math:`\bm{\theta}` are reflected in the quantum state. The metric used to induce the QFIM is the fidelity :math:`f = |\langle \psi | \psi' \rangle|^2` between two (pure) quantum states. This leads to the following definition of the QFIM (see eq. (27) in `arxiv:2103.15191 `_): .. math:: \text{QFIM}_{i, j} = 4 \text{Re}\left[ \langle \partial_i \psi(\bm{\theta}) | \partial_j \psi(\bm{\theta}) \rangle - \langle \partial_i \psi(\bm{\theta}) | \psi(\bm{\theta}) \rangle \langle \psi(\bm{\theta}) | \partial_j \psi(\bm{\theta}) \rangle \right] with short notation :math:`| \partial_j \psi(\bm{\theta}) \rangle := \frac{\partial}{\partial \theta_j}| \psi(\bm{\theta}) \rangle`. .. seealso:: :func:`~.pennylane.metric_tensor`, :func:`~.pennylane.adjoint_metric_tensor`, :func:`~.pennylane.qinfo.transforms.classical_fisher` Args: qnode (:class:`.QNode`): A :class:`.QNode` that may have arbitrary return types. args: In case finite shots are used, further arguments according to :func:`~.pennylane.metric_tensor` may be passed. Returns: func: The function that computes the quantum fisher information matrix. .. note:: ``quantum_fisher`` coincides with the ``metric_tensor`` with a prefactor of :math:`4`. In case a device with finite shots is used, the hardware compatible transform :func:`~.pennylane.metric_tensor` is used. In case of a device with ``shots=None``, :func:`~.pennylane.adjoint_metric_tensor` is used. Please refer to their respective documentations for details on the arguments. **Example** The quantum Fisher information matrix (QIFM) can be used to compute the `natural` gradient for `Quantum Natural Gradient Descent `_. A typical scenario is optimizing the expectation value of a Hamiltonian: .. code-block:: python n_wires = 2 dev = qml.device(""default.qubit"", wires=n_wires) H = 1.*qml.PauliX(0) @ qml.PauliX(1) - 0.5 * qml.PauliZ(1) @qml.qnode(dev) def circ(params): qml.RY(params[0], wires=1) qml.CNOT(wires=(1,0)) qml.RY(params[1], wires=1) qml.RZ(params[2], wires=1) return qml.expval(H) params = pnp.array([0.5, 1., 0.2], requires_grad=True) The natural gradient is then simply the QFIM multiplied by the gradient: >>> grad = qml.grad(circ)(params) [ 0.59422561, -0.02615095, -0.05146226] >>> qfim = qml.qinfo.quantum_fisher(circ)(params) np.diag([1., 1., 0.77517241]) >>> q_nat_grad = qfim @ grad [ 0.59422561 -0.02615095 -0.03989212] When using real hardware or finite shots, ``quantum_fisher`` is internally calling :func:`~.pennylane.metric_tensor`. To obtain the full QFIM, we need an auxilary wire to perform the Hadamard test. >>> dev = qml.device(""default.qubit"", wires=n_wires+1, shots=1000) >>> @qml.qnode(dev) ... def circ(params): ... qml.RY(params[0], wires=1) ... qml.CNOT(wires=(1,0)) ... qml.RY(params[1], wires=1) ... qml.RZ(params[2], wires=1) ... return qml.expval(H) >>> qfim = qml.qinfo.quantum_fisher(circ)(params) Alternatively, we can fall back on the block-diagonal QFIM without the additional wire. >>> qfim = qml.qinfo.quantum_fisher(circ, approx=""block-diag"")(params) """""" if qnode.device.shots is not None: def wrapper(*args0, **kwargs0): return 4 * metric_tensor(qnode, *args, **kwargs)(*args0, **kwargs0) else: def wrapper(*args0, **kwargs0): return 4 * adjoint_metric_tensor(qnode, *args, **kwargs)(*args0, **kwargs0) return wrapper ","def quantum_fisher(qnode, *args, **kwargs): r""""""Returns a function that computes the quantum fisher information matrix (QFIM) of a given :class:`.QNode`. Given a parametrized quantum state :math:`|\psi(\bm{\theta})\rangle`, the quantum fisher information matrix (QFIM) quantifies how changes to the parameters :math:`\bm{\theta}` are reflected in the quantum state. The metric used to induce the QFIM is the fidelity :math:`f = |\langle \psi | \psi' \rangle|^2` between two (pure) quantum states. This leads to the following definition of the QFIM (see eq. (27) in `arxiv:2103.15191 `_): .. math:: \text{QFIM}_{i, j} = 4 \text{Re}\left[ \langle \partial_i \psi(\bm{\theta}) | \partial_j \psi(\bm{\theta}) \rangle - \langle \partial_i \psi(\bm{\theta}) | \psi(\bm{\theta}) \rangle \langle \psi(\bm{\theta}) | \partial_j \psi(\bm{\theta}) \rangle \right] with short notation :math:`| \partial_j \psi(\bm{\theta}) \rangle := \frac{\partial}{\partial \theta_j}| \psi(\bm{\theta}) \rangle`. .. seealso:: :func:`~.pennylane.metric_tensor`, :func:`~.pennylane.adjoint_metric_tensor`, :func:`~.pennylane.qinfo.transforms.classical_fisher` Args: qnode (:class:`.QNode`): A :class:`.QNode` that may have arbitrary return types. args: In case finite shots are used, further arguments according to :func:`~.pennylane.metric_tensor` may be passed. Returns: func: The function that computes the quantum fisher information matrix. .. note:: ``quantum_fisher`` coincides with the ``metric_tensor`` with a prefactor of :math:`4`. In case a device with finite shots is used, the hardware compatible transform :func:`~.pennylane.metric_tensor` is used. In case of a device with ``shots=None``, :func:`~.pennylane.adjoint_metric_tensor` is used. Please refer to their respective documentations for details on the arguments. **Example** The quantum Fisher information matrix (QIFM) can be used to compute the `natural` gradient for `Quantum Natural Gradient Descent `_. A typical scenario is optimizing the expectation value of a Hamiltonian: .. code-block:: python n_wires = 2 dev = qml.device(""default.qubit"", wires=n_wires) H = 1.*qml.PauliX(0) @ qml.PauliX(1) - 0.5 * qml.PauliZ(1) @qml.qnode(dev) def circ(params): qml.RY(params[0], wires=1) qml.CNOT(wires=(1,0)) qml.RY(params[1], wires=1) qml.RZ(params[2], wires=1) return qml.expval(H) params = pnp.array([0.5, 1., 0.2], requires_grad=True) The natural gradient is then simply the QFIM multiplied by the gradient: >>> grad = qml.grad(circ)(params) [ 0.59422561, -0.02615095, -0.05146226] >>> qfim = qml.qinfo.quantum_fisher(circ)(params) np.diag([1., 1., 0.77517241]) >>> q_nat_grad = qfim @ grad [ 0.59422561 -0.02615095 -0.03989212] When using real hardware or finite shots, ``quantum_fisher`` is internally calling :func:`~.pennylane.metric_tensor`. To obtain the full QFIM, we need an auxilary wire to perform the Hadamard test. >>> dev = qml.device(""default.qubit"", wires=n_wires+1, shots=1000) >>> @qml.qnode(dev) ... def circ(params): ... qml.RY(params[0], wires=1) ... qml.CNOT(wires=(1,0)) ... qml.RY(params[1], wires=1) ... qml.RZ(params[2], wires=1) ... return qml.expval(H) >>> qfim = qml.qinfo.quantum_fisher(circ)(params) Alternatively, we can fall back on the block-diagonal QFIM without the additional wire. >>> qfim = qml.qinfo.quantum_fisher(circ, approx=""block-diag"")(params) """""" if qnode.device.shots is not None and isinstance(qnode.device, DefaultQubit): def wrapper(*args0, **kwargs0): return 4 * metric_tensor(qnode, *args, **kwargs)(*args0, **kwargs0) else: def wrapper(*args0, **kwargs0): return 4 * adjoint_metric_tensor(qnode, *args, **kwargs)(*args0, **kwargs0) return wrapper " 57893,"def main() -> None: verify_certificate = not demisto.params().get('insecure', False) proxy = demisto.params().get('proxy', False) # How much time before the first fetch to retrieve incidents first_fetch_timestamp = get_first_time_fetch(demisto.params().get('first_fetch')) demisto.debug(f'Command being called is {demisto.command()}') try: # Initialize Client client = Client(base_url=BASE_URL, verify=verify_certificate, headers={}, proxy=proxy) # Run the requested command if demisto.command() == 'test-module': return_results(test_module(client, first_fetch_timestamp)) elif demisto.command() == 'fetch-incidents': # Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH max_results = get_max_fetch(demisto.params().get('max_fetch')) next_run, incidents = fetch_incidents( client=client, max_results=max_results, last_run=demisto.getLastRun(), first_fetch_time=first_fetch_timestamp, incident_types=demisto.params().get('incident_types', DEFAULT_INCIDENT_TYPES) ) # Set last run and create incidents in XSOAR demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == 'radark-incident-get-items': return_results(incident_get_items_command(client, demisto.args())) elif demisto.command() == 'radark-email-enrich': return_results(email_enrich_command(client, demisto.args())) elif demisto.command() == 'radark-item-handle': return_results(item_handle_command(client, demisto.args())) elif demisto.command() == 'radark-item-purchase': return_results(item_purchase_command(client, demisto.args())) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') ","def main() -> None: params = demisto.params() verify_certificate = not params.get('insecure', False) proxy = demisto.params().get('proxy', False) # How much time before the first fetch to retrieve incidents first_fetch_timestamp = get_first_time_fetch(demisto.params().get('first_fetch')) demisto.debug(f'Command being called is {demisto.command()}') try: # Initialize Client client = Client(base_url=BASE_URL, verify=verify_certificate, headers={}, proxy=proxy) # Run the requested command if demisto.command() == 'test-module': return_results(test_module(client, first_fetch_timestamp)) elif demisto.command() == 'fetch-incidents': # Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH max_results = get_max_fetch(demisto.params().get('max_fetch')) next_run, incidents = fetch_incidents( client=client, max_results=max_results, last_run=demisto.getLastRun(), first_fetch_time=first_fetch_timestamp, incident_types=demisto.params().get('incident_types', DEFAULT_INCIDENT_TYPES) ) # Set last run and create incidents in XSOAR demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == 'radark-incident-get-items': return_results(incident_get_items_command(client, demisto.args())) elif demisto.command() == 'radark-email-enrich': return_results(email_enrich_command(client, demisto.args())) elif demisto.command() == 'radark-item-handle': return_results(item_handle_command(client, demisto.args())) elif demisto.command() == 'radark-item-purchase': return_results(item_purchase_command(client, demisto.args())) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') " 46755,"def get_relpath(src_path): """""" Retrieve relative path to the source root from the perspective of a Markdown file Examples -------- >>> get_relpath(""02-common-principles.md"") '.' >>> get_relpath(""04-modality-specific-files/01-magnetic-resonance-imaging-data.md"") '..' >>> get_relpath(""we/lack/third_levels.md"") '../..' """""" return posixpath.relpath(""."", posixpath.dirname(src_path)) ","def get_relpath(src_path): """"""Retrieve relative path to the source root from the perspective of a Markdown file. Examples -------- >>> get_relpath(""02-common-principles.md"") '.' >>> get_relpath(""04-modality-specific-files/01-magnetic-resonance-imaging-data.md"") '..' >>> get_relpath(""we/lack/third_levels.md"") '../..' """""" return posixpath.relpath(""."", posixpath.dirname(src_path)) " 31921,"def download_malware_sample(sha256, api_url, use_ssl): return http_request('GET', f'download/{sha256}', api_url=api_url, use_ssl=use_ssl, ) ","def download_malware_sample(sha256, api_url, use_ssl): return http_request('GET', f'download/{sha256}', api_url=api_url, use_ssl=use_ssl) " 53861,"def load_arguments(self, _): from argcomplete.completers import FilesCompleter from argcomplete.completers import DirectoriesCompleter from azure.mgmt.resource.locks.models import LockLevel from azure.mgmt.resource.managedapplications.models import ApplicationLockLevel from azure.mgmt.resource.policy.models import (ExemptionCategory, EnforcementMode) from azure.cli.core.commands.validators import get_default_location_from_resource_group from azure.cli.core.api import get_subscription_id_list from azure.cli.core.commands.parameters import ( resource_group_name_type, get_location_type, tag_type, tags_type, get_resource_group_completion_list, no_wait_type, file_type, get_enum_type, get_three_state_flag) from azure.cli.core.profiles import ResourceType from azure.cli.core.local_context import LocalContextAttribute, LocalContextAction, ALL from knack.arguments import ignore_type, CLIArgumentType from azure.cli.command_modules.resource._completers import ( get_policy_completion_list, get_policy_set_completion_list, get_policy_assignment_completion_list, get_policy_exemption_completion_list, get_resource_types_completion_list, get_providers_completion_list) from azure.cli.command_modules.resource._validators import ( validate_lock_parameters, validate_resource_lock, validate_group_lock, validate_subscription_lock, validate_metadata, RollbackAction) from azure.cli.command_modules.resource.parameters import TagUpdateOperation DeploymentMode, WhatIfResultFormat, ChangeType = self.get_models('DeploymentMode', 'WhatIfResultFormat', 'ChangeType') # BASIC PARAMETER CONFIGURATION resource_name_type = CLIArgumentType(options_list=['--name', '-n'], help='The resource name. (Ex: myC)') resource_type_type = CLIArgumentType(help=""The resource type (Ex: 'resC'). Can also accept namespace/type format (Ex: 'Microsoft.Provider/resC')"") resource_namespace_type = CLIArgumentType(options_list='--namespace', completer=get_providers_completion_list, help=""Provider namespace (Ex: 'Microsoft.Provider')"") resource_parent_type = CLIArgumentType(required=False, options_list=['--parent'], help=""The parent path (Ex: 'resA/myA/resB/myB')"") existing_policy_definition_name_type = CLIArgumentType(options_list=['--name', '-n'], completer=get_policy_completion_list, help='The policy definition name.') existing_policy_set_definition_name_type = CLIArgumentType(options_list=['--name', '-n'], completer=get_policy_set_completion_list, help='The policy set definition name.') subscription_type = CLIArgumentType(options_list='--subscription', FilesCompleter=get_subscription_id_list, help='The subscription id of the policy [set] definition.') management_group_name_type = CLIArgumentType(options_list='--management-group', help='The name of the management group of the policy [set] definition. This parameter is required if your policy set is scoped to a management group.') identity_scope_type = CLIArgumentType(help=""Scope that the system assigned identity can access"") identity_role_type = CLIArgumentType(options_list=['--role'], help=""Role name or id that will be assigned to the managed identity"") extended_json_format_type = CLIArgumentType(options_list=['--handle-extended-json-format', '-j'], action='store_true', help='Support to handle extended template content including multiline and comments in deployment') deployment_name_type = CLIArgumentType(options_list=['--name', '-n'], required=True, help='The deployment name.') deployment_create_name_type = CLIArgumentType(options_list=['--name', '-n'], required=False, help='The deployment name. Default to template file base name') management_group_id_type = CLIArgumentType(options_list=['--management-group-id', '-m'], required=True, help='The management group id.') deployment_template_file_type = CLIArgumentType(options_list=['--template-file', '-f'], completer=FilesCompleter(), type=file_type, help=""a path to a template file or Bicep file in the file system"") deployment_template_uri_type = CLIArgumentType(options_list=['--template-uri', '-u'], help='a uri to a remote template file') deployment_template_spec_type = CLIArgumentType(options_list=['--template-spec', '-s'], min_api='2019-06-01', help=""The template spec resource id."") deployment_query_string_type = CLIArgumentType(options_list=['--query-string', '-q'], help=""The query string (a SAS token) to be used with the template-uri in the case of linked templates."") deployment_parameters_type = CLIArgumentType(options_list=['--parameters', '-p'], action='append', nargs='+', completer=FilesCompleter(), help='the deployment parameters') filter_type = CLIArgumentType(options_list=['--filter'], is_preview=True, help='Filter expression using OData notation. You can use --filter ""provisioningState eq \'{state}\'"" to filter provisioningState. ' 'To get more information, please visit https://docs.microsoft.com/rest/api/resources/deployments/listatsubscriptionscope#uri-parameters') no_prompt = CLIArgumentType(arg_type=get_three_state_flag(), help='The option to disable the prompt of missing parameters for ARM template. ' 'When the value is true, the prompt requiring users to provide missing parameter will be ignored. The default value is false.') deployment_what_if_type = CLIArgumentType(options_list=['--what-if', '-w'], action='store_true', help='Instruct the command to run deployment What-If.', min_api='2019-07-01') deployment_what_if_proceed_if_no_change_type = CLIArgumentType(options_list=['--proceed-if-no-change'], action='store_true', help='Instruct the command to execute the deployment if the What-If result contains no resource changes. Applicable when --confirm-with-what-if is set.', min_api='2019-07-01') deployment_what_if_result_format_type = CLIArgumentType(options_list=['--result-format', '-r'], arg_type=get_enum_type(WhatIfResultFormat, ""FullResourcePayloads""), min_api='2019-07-01') deployment_what_if_no_pretty_print_type = CLIArgumentType(options_list=['--no-pretty-print'], action='store_true', help='Disable pretty-print for What-If results. When set, the output format type will be used.') deployment_what_if_confirmation_type = CLIArgumentType(options_list=['--confirm-with-what-if', '-c'], action='store_true', help='Instruct the command to run deployment What-If before executing the deployment. It then prompts you to acknowledge resource changes before it continues.', min_api='2019-07-01') deployment_what_if_exclude_change_types_type = CLIArgumentType(nargs=""+"", options_list=['--exclude-change-types', '-x'], arg_type=get_enum_type(ChangeType), help='Space-separated list of resource change types to be excluded from What-If results.', min_api='2019-07-01') tag_name_type = CLIArgumentType(options_list=['--name', '-n'], help='The tag name.') tag_value_type = CLIArgumentType(options_list='--value', help='The tag value.') tag_resource_id_type = CLIArgumentType(options_list='--resource-id', help='The resource identifier for the tagged entity. A resource, a resource group or a subscription may be tagged.', min_api='2019-10-01') latest_include_preview_type = CLIArgumentType(options_list=['--latest-include-preview', '-v'], is_preview=True, action='store_true', arg_group='Resource Id', help='Indicate that the latest api-version will be used regardless of whether it is preview version (like 2020-01-01-preview) or not. ' 'For example, if the supported api-version of resource provider is 2020-01-01-preview and 2019-01-01: ' 'when passing in this parameter it will take the latest version 2020-01-01-preview, otherwise it will take the latest stable version 2019-01-01 without passing in this parameter') ts_display_name_type = CLIArgumentType(options_list=['--display-name', '-d'], help='The display name of the template spec') ts_description_type = CLIArgumentType(options_list=['--description'], help='The description of the parent template spec.') ts_version_description_type = CLIArgumentType(options_list=['--version-description'], help='The description of the template spec version.') ui_form_definition_file_type = CLIArgumentType(options_list=['--ui-form-definition'], completer=FilesCompleter(), type=file_type, help=""A path to a uiFormDefinition file in the file system"") bicep_target_platform_type = CLIArgumentType(options_list=['--target-platform', '-t'], arg_type=get_enum_type( [""win-x64"", ""linux-musl-x64"", ""linux-x64"", ""osx-x64""]), help=""The platform the Bicep CLI will be running on. Set this to skip automatic platform detection if it does not work properly."") _PROVIDER_HELP_TEXT = 'the resource namespace, aka \'provider\'' with self.argument_context('resource') as c: c.argument('no_wait', no_wait_type) c.argument('resource_group_name', resource_group_name_type, arg_group='Resource Id') c.ignore('resource_id') c.argument('resource_name', resource_name_type, arg_group='Resource Id') c.argument('api_version', help='The api version of the resource (omit for the latest stable version)', required=False, arg_group='Resource Id') c.argument('resource_provider_namespace', resource_namespace_type, arg_group='Resource Id') c.argument('resource_type', arg_type=resource_type_type, completer=get_resource_types_completion_list, arg_group='Resource Id') c.argument('parent_resource_path', resource_parent_type, arg_group='Resource Id') c.argument('tag', tag_type) c.argument('tags', tags_type) c.argument('resource_ids', nargs='+', options_list=['--ids'], help='One or more resource IDs (space-delimited). If provided, no other ""Resource Id"" arguments should be specified.', arg_group='Resource Id') c.argument('include_response_body', arg_type=get_three_state_flag(), help='Use if the default command output doesn\'t capture all of the property data.') c.argument('latest_include_preview', latest_include_preview_type) with self.argument_context('resource list') as c: c.argument('name', resource_name_type) with self.argument_context('resource move') as c: c.argument('ids', nargs='+') with self.argument_context('resource invoke-action') as c: c.argument('action', help='The action that will be invoked on the specified resource') c.argument('request_body', help='JSON encoded parameter arguments for the action that will be passed along in the post request body. Use @{file} to load from a file.') with self.argument_context('resource create') as c: c.argument('resource_id', options_list=['--id'], help='Resource ID.', action=None) c.argument('properties', options_list=['--properties', '-p'], help='a JSON-formatted string containing resource properties') c.argument('is_full_object', action='store_true', help='Indicate that the properties object includes other options such as location, tags, sku, and/or plan.') with self.argument_context('resource link') as c: c.argument('target_id', options_list=['--target', c.deprecate(target='--target-id', redirect='--target', hide=True)], help='Fully-qualified resource ID of the resource link target.') c.argument('link_id', options_list=['--link', c.deprecate(target='--link-id', redirect='--link', hide=True)], help='Fully-qualified resource ID of the resource link.') c.argument('notes', help='Notes for the link.') c.argument('scope', help='Fully-qualified scope for retrieving links.') c.argument('filter_string', options_list=['--filter', c.deprecate(target='--filter-string', redirect='--filter', hide=True)], help='Filter string for limiting results.') with self.argument_context('resource tag') as c: c.argument('is_incremental', action='store_true', options_list=['--is-incremental', '-i'], help='The option to add tags incrementally without deleting the original tags. If the key of new tag and original tag are duplicated, the original value will be overwritten.') with self.argument_context('resource wait') as c: c.ignore('latest_include_preview') with self.argument_context('provider') as c: c.ignore('top') c.argument('resource_provider_namespace', options_list=['--namespace', '-n'], completer=get_providers_completion_list, help=_PROVIDER_HELP_TEXT) with self.argument_context('provider register') as c: c.argument('mg', help=""The management group id to register."", options_list=['--management-group-id', '-m']) c.argument('accept_terms', action='store_true', is_preview=True, help=""Accept market place terms and RP terms for RPaaS. Required when registering RPs from RPaaS, such as 'Microsoft.Confluent' and 'Microsoft.Datadog'."", deprecate_info=c.deprecate(hide=True)) c.argument('wait', action='store_true', help='wait for the registration to finish') c.argument('consent_to_permissions', options_list=['--consent-to-permissions', '-c'], action='store_true', help='A value indicating whether authorization is consented or not.') with self.argument_context('provider unregister') as c: c.argument('wait', action='store_true', help='wait for unregistration to finish') with self.argument_context('provider operation') as c: c.argument('api_version', help=""The api version of the 'Microsoft.Authorization/providerOperations' resource (omit for the latest stable version)"") with self.argument_context('feature') as c: c.argument('resource_provider_namespace', options_list='--namespace', required=True, help=_PROVIDER_HELP_TEXT) c.argument('feature_name', options_list=['--name', '-n'], help='the feature name') with self.argument_context('feature list') as c: c.argument('resource_provider_namespace', options_list='--namespace', required=False, help=_PROVIDER_HELP_TEXT) with self.argument_context('feature registration') as c: c.argument('resource_provider_namespace', options_list='--namespace', required=True, help=_PROVIDER_HELP_TEXT) c.argument('feature_name', options_list=['--name', '-n'], help='the feature name') with self.argument_context('feature registration list') as c: c.argument('resource_provider_namespace', options_list='--namespace', required=False, help=_PROVIDER_HELP_TEXT) with self.argument_context('policy') as c: c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group where the policy will be applied') with self.argument_context('policy definition', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('policy_definition_name', arg_type=existing_policy_definition_name_type) c.argument('rules', help='JSON formatted string or a path to a file with such content', type=file_type, completer=FilesCompleter()) c.argument('display_name', help='Display name of policy definition.') c.argument('description', help='Description of policy definition.') c.argument('params', help='JSON formatted string or a path to a file or uri with parameter definitions.', type=file_type, completer=FilesCompleter(), min_api='2016-12-01') c.argument('metadata', min_api='2017-06-01-preview', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.') c.argument('management_group', arg_type=management_group_name_type) c.argument('mode', options_list=['--mode', '-m'], help='Mode of the policy definition, e.g. All, Indexed. Please visit https://aka.ms/azure-policy-mode for more information.', min_api='2016-12-01') c.argument('subscription', arg_type=subscription_type) c.ignore('_subscription') # disable global subscription with self.argument_context('policy definition create', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('name', options_list=['--name', '-n'], help='Name of the new policy definition.') with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('name', options_list=['--name', '-n'], completer=get_policy_assignment_completion_list, help='Name of the policy assignment.') c.argument('scope', help='Scope to which this policy assignment applies.') c.argument('disable_scope_strict_match', action='store_true', help='Include policy assignments either inherited from parent scope or at child scope.') c.argument('display_name', help='Display name of the policy assignment.') c.argument('description', help='Description of the policy assignment.', min_api='2016-12-01') c.argument('policy', help='Name or id of the policy definition.', completer=get_policy_completion_list) c.argument('params', options_list=['--params', '-p'], help='JSON formatted string or a path to a file or uri with parameter values of the policy rule.', type=file_type, completer=FilesCompleter(), min_api='2016-12-01') with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2017-06-01-preview') as c: c.argument('policy_set_definition', options_list=['--policy-set-definition', '-d'], help='Name or id of the policy set definition.') c.argument('sku', options_list=['--sku', '-s'], help='policy sku.', arg_type=get_enum_type(['free', 'standard']), deprecate_info=c.deprecate(hide=True)) c.argument('notscopes', options_list='--not-scopes', nargs='+') with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY, arg_group='Managed Identity', min_api='2018-05-01') as c: c.argument('assign_identity', nargs='*', help=""Assigns a system assigned identity to the policy assignment. This argument will be deprecated, please use --mi-system-assigned instead"", deprecate_info=c.deprecate(hide=True)) c.argument('mi_system_assigned', action='store_true', help='Provide this flag to use system assigned identity for policy assignment. Check out help for more examples') c.argument('mi_user_assigned', min_api='2021-06-01', help='UserAssigned Identity Id to be used for policy assignment. Check out help for more examples') c.argument('identity_scope', arg_type=identity_scope_type) c.argument('identity_role', arg_type=identity_role_type) with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2019-06-01') as c: c.argument('enforcement_mode', options_list=['--enforcement-mode', '-e'], help='Enforcement mode of the policy assignment, e.g. Default, DoNotEnforce. Please visit https://aka.ms/azure-policyAssignment-enforcement-mode for more information.', arg_type=get_enum_type(EnforcementMode)) with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('name', options_list=['--name', '-n'], help='Name of the new policy assignment.') with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2018-05-01') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), help='The location of the policy assignment. Only required when utilizing managed identity.') with self.argument_context('policy assignment identity', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2018-05-01') as c: c.argument('mi_system_assigned', action='store_true', options_list=['--system-assigned'], help='Provide this flag to use system assigned identity for policy assignment. Check out help for more examples') c.argument('mi_user_assigned', options_list=['--user-assigned'], min_api='2021-06-01', help='UserAssigned Identity Id to be used for policy assignment. Check out help for more examples') c.argument('identity_scope', arg_type=identity_scope_type) c.argument('identity_role', arg_type=identity_role_type) with self.argument_context('policy assignment non-compliance-message', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2020-09-01') as c: c.argument('message', options_list=['--message', '-m'], help='Message that will be shown when a resource is denied by policy or evaluation details are inspected.') c.argument('policy_definition_reference_id', options_list=['--policy-definition-reference-id', '-r'], help='Policy definition reference ID within the assigned initiative (policy set) that the message applies to.') with self.argument_context('policy set-definition', min_api='2017-06-01-preview', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('policy_set_definition_name', arg_type=existing_policy_set_definition_name_type) c.argument('display_name', help='Display name of policy set definition.') c.argument('description', help='Description of policy set definition.') c.argument('params', help='JSON formatted string or a path to a file or uri with parameter definitions.', type=file_type, completer=FilesCompleter()) c.argument('definitions', help='JSON formatted string or a path to a file or uri containing definitions.', type=file_type, completer=FilesCompleter()) c.argument('definition_groups', min_api='2019-09-01', help='JSON formatted string or a path to a file or uri containing policy definition groups. Groups are used to organize policy definitions within a policy set.', type=file_type, completer=FilesCompleter()) c.argument('metadata', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.') c.argument('management_group', arg_type=management_group_name_type) c.argument('subscription', arg_type=subscription_type) c.ignore('_subscription') # disable global subscription with self.argument_context('policy set-definition create', min_api='2017-06-01-preview', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('name', options_list=['--name', '-n'], help='Name of the new policy set definition.') with self.argument_context('policy exemption', min_api='2020-09-01', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.ignore('_subscription') c.argument('name', options_list=['--name', '-n'], completer=get_policy_exemption_completion_list, help='Name of the policy exemption.') c.argument('scope', help='Scope to which this policy exemption applies.') c.argument('disable_scope_strict_match', options_list=['--disable-scope-strict-match', '-i'], action='store_true', help='Include policy exemptions either inherited from parent scope or at child scope.') c.argument('display_name', help='Display name of the policy exemption.') c.argument('description', help='Description of policy exemption.') c.argument('exemption_category', options_list=['--exemption-category', '-e'], help='The policy exemption category of the policy exemption', arg_type=get_enum_type(ExemptionCategory)) c.argument('policy_definition_reference_ids', nargs='+', options_list=['--policy-definition-reference-ids', '-r'], help='The policy definition reference ids to exempt in the initiative (policy set).') c.argument('expires_on', help='The expiration date and time (in UTC ISO 8601 format yyyy-MM-ddTHH:mm:ssZ) of the policy exemption.') c.argument('metadata', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.') with self.argument_context('policy exemption create', min_api='2020-09-01', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('name', options_list=['--name', '-n'], help='Name of the new policy exemption.') c.argument('policy_assignment', options_list=['--policy-assignment', '-a'], help='The referenced policy assignment Id for the policy exemption.') with self.argument_context('group') as c: c.argument('tag', tag_type) c.argument('tags', tags_type) c.argument('resource_group_name', resource_group_name_type, options_list=['--name', '-n', '--resource-group', '-g']) with self.argument_context('group deployment') as c: c.argument('resource_group_name', arg_type=resource_group_name_type, completer=get_resource_group_completion_list) c.argument('deployment_name', arg_type=deployment_name_type) c.argument('template_file', arg_type=deployment_template_file_type) c.argument('template_uri', arg_type=deployment_template_uri_type) c.argument('mode', arg_type=get_enum_type(DeploymentMode, default='incremental'), help='Incremental (only add resources to resource group) or Complete (remove extra resources from resource group)') c.argument('parameters', arg_type=deployment_parameters_type) c.argument('rollback_on_error', nargs='?', action=RollbackAction, help='The name of a deployment to roll back to on error, or use as a flag to roll back to the last successful deployment.') with self.argument_context('group deployment create') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('aux_subscriptions', nargs='+', options_list=['--aux-subs'], help='Auxiliary subscriptions which will be used during deployment across tenants.', deprecate_info=c.deprecate(target='--aux-subs', redirect='--aux-tenants')) c.argument('aux_tenants', nargs='+', options_list=['--aux-tenants'], help='Auxiliary tenants which will be used during deployment across tenants.') c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('group deployment validate') as c: c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('group deployment list') as c: c.argument('filter_string', arg_type=filter_type) with self.argument_context('group deployment operation show') as c: c.argument('operation_ids', nargs='+', help='A list of operation ids to show') with self.argument_context('deployment') as c: c.argument('deployment_name', arg_type=deployment_name_type) c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True) c.argument('template_file', arg_type=deployment_template_file_type) c.argument('template_uri', arg_type=deployment_template_uri_type) c.argument('template_spec', arg_type=deployment_template_spec_type) c.argument('query_string', arg_type=deployment_query_string_type) c.argument('parameters', arg_type=deployment_parameters_type) with self.argument_context('deployment create') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type) c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'], arg_type=deployment_what_if_result_format_type) c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'], arg_type=deployment_what_if_exclude_change_types_type, help=""Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set."") c.argument('what_if', arg_type=deployment_what_if_type) c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type) with self.argument_context('deployment validate') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('deployment operation') as c: c.argument('operation_ids', nargs='+', help='A list of operation ids to show') with self.argument_context('deployment list') as c: c.argument('filter_string', arg_type=filter_type) with self.argument_context('deployment sub') as c: c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True) with self.argument_context('deployment sub create') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type) c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'], arg_type=deployment_what_if_result_format_type) c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'], arg_type=deployment_what_if_exclude_change_types_type, help=""Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set."") c.argument('what_if', arg_type=deployment_what_if_type) c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type) with self.argument_context('deployment sub what-if') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('no_prompt', arg_type=no_prompt) c.argument('result_format', arg_type=deployment_what_if_result_format_type) c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type) c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type) with self.argument_context('deployment sub validate') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('deployment sub list') as c: c.argument('filter_string', arg_type=filter_type) with self.argument_context('deployment group') as c: c.argument('resource_group_name', arg_type=resource_group_name_type, completer=get_resource_group_completion_list, required=True) c.argument('mode', arg_type=get_enum_type(DeploymentMode, default='incremental'), help='Incremental (only add resources to resource group) or Complete (remove extra resources from resource group)') c.argument('rollback_on_error', nargs='?', action=RollbackAction, help='The name of a deployment to roll back to on error, or use as a flag to roll back to the last successful deployment.') with self.argument_context('deployment group create') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('aux_subscriptions', nargs='+', options_list=['--aux-subs'], help='Auxiliary subscriptions which will be used during deployment across tenants.', deprecate_info=c.deprecate(target='--aux-subs', redirect='--aux-tenants')) c.argument('aux_tenants', nargs='+', options_list=['--aux-tenants'], help='Auxiliary tenants which will be used during deployment across tenants.') c.argument('no_prompt', arg_type=no_prompt) c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type) c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'], arg_type=deployment_what_if_result_format_type) c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'], arg_type=deployment_what_if_exclude_change_types_type, help=""Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set."") c.argument('what_if', arg_type=deployment_what_if_type) c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type) with self.argument_context('deployment group what-if') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('aux_tenants', nargs='+', options_list=['--aux-tenants'], help='Auxiliary tenants which will be used during deployment across tenants.') c.argument('no_prompt', arg_type=no_prompt) c.argument('result_format', arg_type=deployment_what_if_result_format_type) c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type) c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type) c.ignore(""rollback_on_error"") with self.argument_context('deployment group validate') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('deployment group list') as c: c.argument('filter_string', arg_type=filter_type) with self.argument_context('deployment mg') as c: c.argument('management_group_id', arg_type=management_group_id_type) c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True) with self.argument_context('deployment mg create') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type, min_api=""2019-10-01"") c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'], arg_type=deployment_what_if_result_format_type, min_api=""2019-10-01"") c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'], arg_type=deployment_what_if_exclude_change_types_type, help=""Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set."", min_api=""2019-10-01"") c.argument('what_if', arg_type=deployment_what_if_type) c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type) with self.argument_context('deployment mg what-if') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('no_prompt', arg_type=no_prompt) c.argument('result_format', arg_type=deployment_what_if_result_format_type) c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type) c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type) with self.argument_context('deployment mg validate') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('deployment mg list') as c: c.argument('filter_string', arg_type=filter_type) with self.argument_context('deployment operation mg') as c: c.argument('management_group_id', arg_type=management_group_id_type) with self.argument_context('deployment tenant') as c: c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True) with self.argument_context('deployment tenant create') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type, min_api=""2019-10-01"") c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'], arg_type=deployment_what_if_result_format_type, min_api=""2019-10-01"") c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'], arg_type=deployment_what_if_exclude_change_types_type, help=""Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set."", min_api=""2019-10-01"") c.argument('what_if', arg_type=deployment_what_if_type) c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type) with self.argument_context('deployment tenant what-if') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('no_prompt', arg_type=no_prompt) c.argument('result_format', arg_type=deployment_what_if_result_format_type) c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type) c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type) with self.argument_context('deployment tenant validate') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('deployment tenant list') as c: c.argument('filter_string', arg_type=filter_type) with self.argument_context('group export') as c: c.argument('include_comments', action='store_true') c.argument('include_parameter_default_value', action='store_true') c.argument('skip_resource_name_params', action='store_true') c.argument('skip_all_params', action='store_true') c.argument('resource_ids', nargs='+', options_list='--resource-ids') with self.argument_context('group create') as c: c.argument('rg_name', options_list=['--name', '--resource-group', '-n', '-g'], help='name of the new resource group', completer=None, local_context_attribute=LocalContextAttribute( name='resource_group_name', actions=[LocalContextAction.SET], scopes=[ALL])) c.argument('managed_by', min_api='2016-09-01', help='The ID of the resource that manages this resource group.') with self.argument_context('group delete') as c: c.argument('resource_group_name', resource_group_name_type, options_list=['--name', '-n', '--resource-group', '-g'], local_context_attribute=None) c.argument('force_deletion_types', options_list=['--force-deletion-types', '-f'], arg_type=get_enum_type(['Microsoft.Compute/virtualMachines', 'Microsoft.Compute/virtualMachineScaleSets']), min_api='2021-04-01', help='The resource types you want to force delete.') with self.argument_context('tag') as c: c.argument('tag_name', tag_name_type) c.argument('tag_value', tag_value_type) c.argument('resource_id', tag_resource_id_type) c.argument('tags', tags_type) c.argument('operation', arg_type=get_enum_type([item.value for item in list(TagUpdateOperation)]), help='The update operation: options include Merge, Replace and Delete.') with self.argument_context('lock') as c: c.argument('lock_name', options_list=['--name', '-n'], validator=validate_lock_parameters) c.argument('level', arg_type=get_enum_type(LockLevel), options_list=['--lock-type', '-t'], help='The type of lock restriction.') c.argument('parent_resource_path', resource_parent_type) c.argument('resource_provider_namespace', resource_namespace_type) c.argument('resource_type', arg_type=resource_type_type, completer=get_resource_types_completion_list) c.argument('resource_name', options_list=['--resource', '--resource-name'], help='Name or ID of the resource being locked. If an ID is given, other resource arguments should not be given.') c.argument('ids', nargs='+', options_list='--ids', help='One or more resource IDs (space-delimited). If provided, no other ""Resource Id"" arguments should be specified.') c.argument('resource_group', resource_group_name_type, validator=validate_lock_parameters) with self.argument_context('resource lock') as c: c.argument('resource_group', resource_group_name_type) c.argument('resource_name', options_list=['--resource', '--resource-name'], help='If an ID is given, other resource arguments should not be given.', validator=validate_resource_lock) with self.argument_context('group lock') as c: c.argument('resource_group', resource_group_name_type, validator=validate_group_lock, id_part=None) with self.argument_context('group lock create') as c: c.argument('resource_group', required=True) with self.argument_context('account lock') as c: c.argument('resource_group', ignore_type, validator=validate_subscription_lock) for scope in ['account', 'group']: with self.argument_context('{} lock'.format(scope)) as c: c.ignore('resource_provider_namespace', 'parent_resource_path', 'resource_type', 'resource_name') for scope in ['lock', 'account lock', 'group lock', 'resource lock']: with self.argument_context(scope) as c: c.argument('lock_name', options_list=['--name', '-n'], help='Name of the lock') c.argument('level', options_list=['--lock-type', '-t'], arg_type=get_enum_type([LockLevel.can_not_delete, LockLevel.read_only]), help='The type of lock restriction.') c.argument('ids', nargs='+', options_list='--ids', help='One or more resource IDs (space-delimited). If provided, no other ""Resource Id"" arguments should be specified.') c.argument('notes', help='Notes about this lock.') with self.argument_context('managedapp') as c: c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group of the managed application', id_part='resource_group') c.argument('application_name', options_list=['--name', '-n'], id_part='name') c.argument('tags', tags_type) with self.argument_context('managedapp definition') as c: c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group of the managed application definition', id_part='resource_group') c.argument('application_definition_name', options_list=['--name', '-n'], id_part='name') with self.argument_context('managedapp create') as c: c.argument('name', options_list=['--name', '-n'], help='name of the new managed application', completer=None) c.argument('location', help='the managed application location') c.argument('managedapp_definition_id', options_list=['--managedapp-definition-id', '-d'], help='the full qualified managed application definition id') c.argument('managedby_resource_group_id', options_list=['--managed-rg-id', '-m'], help='the resource group managed by the managed application') c.argument('parameters', help='JSON formatted string or a path to a file with such content', type=file_type) for operation in ['create', 'update']: with self.argument_context('managedapp definition {}'.format(operation)) as c: c.argument('lock_level', arg_type=get_enum_type(ApplicationLockLevel), help='The type of lock restriction.') c.argument('authorizations', options_list=['--authorizations', '-a'], nargs='+', help=""space-separated authorization pairs in a format of `:`"") c.argument('create_ui_definition', options_list=['--create-ui-definition', '-c'], help='JSON formatted string or a path to a file with such content', type=file_type) c.argument('main_template', options_list=['--main-template', '-t'], help='JSON formatted string or a path to a file with such content', type=file_type) with self.argument_context('account') as c: c.argument('subscription', options_list=['--subscription', '-s'], help='Name or ID of subscription.', completer=get_subscription_id_list) c.ignore('_subscription') # hide global subscription parameter with self.argument_context('account management-group') as c: c.argument('group_name', options_list=['--name', '-n']) c.argument('no_register', action='store_true', help='Skip registration for resource provider Microsoft.Management') with self.argument_context('account management-group show') as c: c.argument('expand', options_list=['--expand', '-e'], action='store_true') c.argument('recurse', options_list=['--recurse', '-r'], action='store_true') with self.argument_context('account management-group create') as c: c.argument('display_name', options_list=['--display-name', '-d']) c.argument('parent', options_list=['--parent', '-p']) with self.argument_context('account management-group update') as c: c.argument('display_name', options_list=['--display-name', '-d']) c.argument('parent_id', options_list=['--parent', '-p']) with self.argument_context('account management-group hierarchy-settings create') as c: c.argument('default_management_group', options_list=['--default-management-group', '-m']) c.argument('require_authorization_for_group_creation', options_list=['--require-authorization-for-group-creation', '-r']) with self.argument_context('account management-group hierarchy-settings update') as c: c.argument('default_management_group', options_list=['--default-management-group', '-m']) c.argument('require_authorization_for_group_creation', options_list=['--require-authorization-for-group-creation', '-r']) with self.argument_context('ts') as c: c.argument('name', options_list=['--name', '-n'], help='The name of the template spec.') c.argument('version', options_list=['--version', '-v'], help='The template spec version.') with self.argument_context('ts create') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The resource group to store the template spec.') c.argument('template_file', arg_type=deployment_template_file_type) c.argument('ui_form_definition_file', arg_type=ui_form_definition_file_type, help='The uiFormDefinition file path in the file system for the template spec version.') c.argument('location', options_list=['--location', '-l'], help='The location to store the template-spec and template-spec version(s). Cannot be changed after creation.') c.argument('display_name', arg_type=ts_display_name_type) c.argument('description', arg_type=ts_description_type) c.argument('version_description', arg_type=ts_version_description_type) c.argument('tags', tags_type) c.argument('no_prompt', options_list=['--yes', '-y'], action='store_true', help='Do not prompt for confirmation') with self.argument_context('ts update') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The resource group to store the template spec.') c.argument('template_spec', arg_type=deployment_template_spec_type) c.argument('ui_form_definition_file', arg_type=ui_form_definition_file_type, help='The uiFormDefinition file path in the file system for the template spec version.') c.argument('template_file', arg_type=deployment_template_file_type) c.argument('display_name', arg_type=ts_display_name_type) c.argument('description', arg_type=ts_description_type) c.argument('version_description', arg_type=ts_version_description_type) c.argument('tags', tags_type) with self.argument_context('ts show') as c: c.argument('template_spec', arg_type=deployment_template_spec_type) with self.argument_context('ts export') as c: c.argument('output_folder', options_list=['--output-folder'], help='Existing folder to output export(s).') c.argument('template_spec', arg_type=deployment_template_spec_type) with self.argument_context('ts delete') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The resource group where the template spec or template spec version is stored.') c.argument('template_spec', arg_type=deployment_template_spec_type) with self.argument_context('ts list') as c: c.argument('resource_group', arg_type=resource_group_name_type) with self.argument_context('bicep build') as c: c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(), type=file_type, help=""The path to the Bicep file to build in the file system."")) c.argument('outdir', arg_type=CLIArgumentType(options_list=['--outdir'], completer=DirectoriesCompleter(), help=""When set, saves the output at the specified directory."")) c.argument('outfile', arg_type=CLIArgumentType(options_list=['--outfile'], completer=FilesCompleter(), help=""When set, saves the output as the specified file path."")) c.argument('stdout', arg_type=CLIArgumentType(options_list=['--stdout'], action='store_true', help=""When set, prints all output to stdout instead of corresponding files."")) c.argument('no_restore', arg_type=CLIArgumentType(options_list=['--no-restore'], action='store_true', help=""When set, builds the bicep file without restoring external modules."")) with self.argument_context('bicep decompile') as c: c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(), type=file_type, help=""The path to the ARM template to decompile in the file system."")) c.argument('force', arg_type=CLIArgumentType(options_list=['--force'], action='store_true', help=""Allows overwriting the output file if it exists."")) with self.argument_context('bicep restore') as c: c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(), type=file_type, help=""The path to the Bicep file to restore external modules for."")) c.argument('force', arg_type=CLIArgumentType(options_list=['--force'], action='store_true', help=""Allows overwriting the cached external modules."")) with self.argument_context('bicep publish') as c: c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(), type=file_type, help=""The path to the Bicep module file to publish in the file system."")) c.argument('target', arg_type=CLIArgumentType(options_list=['--target', '-t'], help=""The target location where the Bicep module will be published."")) with self.argument_context('bicep install') as c: c.argument('version', options_list=['--version', '-v'], help='The version of Bicep CLI to be installed. Default to the latest if not specified.') c.argument('target_platform', arg_type=bicep_target_platform_type) with self.argument_context('bicep upgrade') as c: c.argument('target_platform', arg_type=bicep_target_platform_type) with self.argument_context('bicep generate-params') as c: c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(), type=file_type, help=""The path to the Bicep file to generate the parameters file from in the file system."")) c.argument('outdir', arg_type=CLIArgumentType(options_list=['--outdir'], completer=DirectoriesCompleter(), help=""When set, saves the output at the specified directory."")) c.argument('outfile', arg_type=CLIArgumentType(options_list=['--outfile'], completer=FilesCompleter(), help=""When set, saves the output as the specified file path."")) c.argument('stdout', arg_type=CLIArgumentType(options_list=['--stdout'], action='store_true', help=""When set, prints all output to stdout instead of corresponding files."")) c.argument('no_restore', arg_type=CLIArgumentType(options_list=['--no-restore'], action='store_true', help=""When set, generates the parameters file without restoring external modules."")) with self.argument_context('resourcemanagement private-link create') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The name of the resource group.') c.argument('name', options_list=['--name', '-n'], help='The name of the resource management private link.') c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group, help='the region to create the resource management private link') with self.argument_context('resourcemanagement private-link show') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The name of the resource group.') c.argument('name', options_list=['--name', '-n'], help='The name of the resource management private link.') with self.argument_context('resourcemanagement private-link list') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The name of the resource group.') with self.argument_context('resourcemanagement private-link delete') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The name of the resource group.') c.argument('name', options_list=['--name', '-n'], help='The name of the resource management private link.') with self.argument_context('private-link association create') as c: c.argument('management_group_id', arg_type=management_group_id_type) c.argument('name', options_list=['--name', '-n'], help='The name of the private link association') c.argument('privatelink', options_list=['--privatelink', '-p'], help='The name of the private link') c.argument('public_network_access', options_list=['--public-network-access', '-a'], arg_type=get_enum_type(['enabled', 'disabled']), help='restrict traffic to private link') with self.argument_context('private-link association show') as c: c.argument('management_group_id', arg_type=management_group_id_type) c.argument('name', options_list=['--name', '-n'], help='The name of the private link association') with self.argument_context('private-link association list') as c: c.argument('management_group_id', arg_type=management_group_id_type) with self.argument_context('private-link association delete') as c: c.argument('management_group_id', arg_type=management_group_id_type) c.argument('name', options_list=['--name', '-n'], help='The name of the private link association') ","def load_arguments(self, _): from argcomplete.completers import FilesCompleter from argcomplete.completers import DirectoriesCompleter from azure.mgmt.resource.locks.models import LockLevel from azure.mgmt.resource.managedapplications.models import ApplicationLockLevel from azure.mgmt.resource.policy.models import (ExemptionCategory, EnforcementMode) from azure.cli.core.commands.validators import get_default_location_from_resource_group from azure.cli.core.api import get_subscription_id_list from azure.cli.core.commands.parameters import ( resource_group_name_type, get_location_type, tag_type, tags_type, get_resource_group_completion_list, no_wait_type, file_type, get_enum_type, get_three_state_flag) from azure.cli.core.profiles import ResourceType from azure.cli.core.local_context import LocalContextAttribute, LocalContextAction, ALL from knack.arguments import ignore_type, CLIArgumentType from azure.cli.command_modules.resource._completers import ( get_policy_completion_list, get_policy_set_completion_list, get_policy_assignment_completion_list, get_policy_exemption_completion_list, get_resource_types_completion_list, get_providers_completion_list) from azure.cli.command_modules.resource._validators import ( validate_lock_parameters, validate_resource_lock, validate_group_lock, validate_subscription_lock, validate_metadata, RollbackAction) from azure.cli.command_modules.resource.parameters import TagUpdateOperation DeploymentMode, WhatIfResultFormat, ChangeType = self.get_models('DeploymentMode', 'WhatIfResultFormat', 'ChangeType') # BASIC PARAMETER CONFIGURATION resource_name_type = CLIArgumentType(options_list=['--name', '-n'], help='The resource name. (Ex: myC)') resource_type_type = CLIArgumentType(help=""The resource type (Ex: 'resC'). Can also accept namespace/type format (Ex: 'Microsoft.Provider/resC')"") resource_namespace_type = CLIArgumentType(options_list='--namespace', completer=get_providers_completion_list, help=""Provider namespace (Ex: 'Microsoft.Provider')"") resource_parent_type = CLIArgumentType(required=False, options_list=['--parent'], help=""The parent path (Ex: 'resA/myA/resB/myB')"") existing_policy_definition_name_type = CLIArgumentType(options_list=['--name', '-n'], completer=get_policy_completion_list, help='The policy definition name.') existing_policy_set_definition_name_type = CLIArgumentType(options_list=['--name', '-n'], completer=get_policy_set_completion_list, help='The policy set definition name.') subscription_type = CLIArgumentType(options_list='--subscription', FilesCompleter=get_subscription_id_list, help='The subscription id of the policy [set] definition.') management_group_name_type = CLIArgumentType(options_list='--management-group', help='The name of the management group of the policy [set] definition. This parameter is required if your policy set is scoped to a management group.') identity_scope_type = CLIArgumentType(help=""Scope that the system assigned identity can access"") identity_role_type = CLIArgumentType(options_list=['--role'], help=""Role name or id that will be assigned to the managed identity"") extended_json_format_type = CLIArgumentType(options_list=['--handle-extended-json-format', '-j'], action='store_true', help='Support to handle extended template content including multiline and comments in deployment') deployment_name_type = CLIArgumentType(options_list=['--name', '-n'], required=True, help='The deployment name.') deployment_create_name_type = CLIArgumentType(options_list=['--name', '-n'], required=False, help='The deployment name. Default to template file base name') management_group_id_type = CLIArgumentType(options_list=['--management-group-id', '-m'], required=True, help='The management group id.') deployment_template_file_type = CLIArgumentType(options_list=['--template-file', '-f'], completer=FilesCompleter(), type=file_type, help=""a path to a template file or Bicep file in the file system"") deployment_template_uri_type = CLIArgumentType(options_list=['--template-uri', '-u'], help='a uri to a remote template file') deployment_template_spec_type = CLIArgumentType(options_list=['--template-spec', '-s'], min_api='2019-06-01', help=""The template spec resource id."") deployment_query_string_type = CLIArgumentType(options_list=['--query-string', '-q'], help=""The query string (a SAS token) to be used with the template-uri in the case of linked templates."") deployment_parameters_type = CLIArgumentType(options_list=['--parameters', '-p'], action='append', nargs='+', completer=FilesCompleter(), help='the deployment parameters') filter_type = CLIArgumentType(options_list=['--filter'], is_preview=True, help='Filter expression using OData notation. You can use --filter ""provisioningState eq \'{state}\'"" to filter provisioningState. ' 'To get more information, please visit https://docs.microsoft.com/rest/api/resources/deployments/listatsubscriptionscope#uri-parameters') no_prompt = CLIArgumentType(arg_type=get_three_state_flag(), help='The option to disable the prompt of missing parameters for ARM template. ' 'When the value is true, the prompt requiring users to provide missing parameter will be ignored. The default value is false.') deployment_what_if_type = CLIArgumentType(options_list=['--what-if', '-w'], action='store_true', help='Instruct the command to run deployment What-If.', min_api='2019-07-01') deployment_what_if_proceed_if_no_change_type = CLIArgumentType(options_list=['--proceed-if-no-change'], action='store_true', help='Instruct the command to execute the deployment if the What-If result contains no resource changes. Applicable when --confirm-with-what-if is set.', min_api='2019-07-01') deployment_what_if_result_format_type = CLIArgumentType(options_list=['--result-format', '-r'], arg_type=get_enum_type(WhatIfResultFormat, ""FullResourcePayloads""), min_api='2019-07-01') deployment_what_if_no_pretty_print_type = CLIArgumentType(options_list=['--no-pretty-print'], action='store_true', help='Disable pretty-print for What-If results. When set, the output format type will be used.') deployment_what_if_confirmation_type = CLIArgumentType(options_list=['--confirm-with-what-if', '-c'], action='store_true', help='Instruct the command to run deployment What-If before executing the deployment. It then prompts you to acknowledge resource changes before it continues.', min_api='2019-07-01') deployment_what_if_exclude_change_types_type = CLIArgumentType(nargs=""+"", options_list=['--exclude-change-types', '-x'], arg_type=get_enum_type(ChangeType), help='Space-separated list of resource change types to be excluded from What-If results.', min_api='2019-07-01') tag_name_type = CLIArgumentType(options_list=['--name', '-n'], help='The tag name.') tag_value_type = CLIArgumentType(options_list='--value', help='The tag value.') tag_resource_id_type = CLIArgumentType(options_list='--resource-id', help='The resource identifier for the tagged entity. A resource, a resource group or a subscription may be tagged.', min_api='2019-10-01') latest_include_preview_type = CLIArgumentType(options_list=['--latest-include-preview', '-v'], is_preview=True, action='store_true', arg_group='Resource Id', help='Indicate that the latest api-version will be used regardless of whether it is preview version (like 2020-01-01-preview) or not. ' 'For example, if the supported api-version of resource provider is 2020-01-01-preview and 2019-01-01: ' 'when passing in this parameter it will take the latest version 2020-01-01-preview, otherwise it will take the latest stable version 2019-01-01 without passing in this parameter') ts_display_name_type = CLIArgumentType(options_list=['--display-name', '-d'], help='The display name of the template spec') ts_description_type = CLIArgumentType(options_list=['--description'], help='The description of the parent template spec.') ts_version_description_type = CLIArgumentType(options_list=['--version-description'], help='The description of the template spec version.') ui_form_definition_file_type = CLIArgumentType(options_list=['--ui-form-definition'], completer=FilesCompleter(), type=file_type, help=""A path to a uiFormDefinition file in the file system"") bicep_target_platform_type = CLIArgumentType(options_list=['--target-platform', '-t'], arg_type=get_enum_type( [""win-x64"", ""linux-musl-x64"", ""linux-x64"", ""osx-x64""]), help=""The platform the Bicep CLI will be running on. Set this to skip automatic platform detection if it does not work properly."") _PROVIDER_HELP_TEXT = 'the resource namespace, aka \'provider\'' with self.argument_context('resource') as c: c.argument('no_wait', no_wait_type) c.argument('resource_group_name', resource_group_name_type, arg_group='Resource Id') c.ignore('resource_id') c.argument('resource_name', resource_name_type, arg_group='Resource Id') c.argument('api_version', help='The api version of the resource (omit for the latest stable version)', required=False, arg_group='Resource Id') c.argument('resource_provider_namespace', resource_namespace_type, arg_group='Resource Id') c.argument('resource_type', arg_type=resource_type_type, completer=get_resource_types_completion_list, arg_group='Resource Id') c.argument('parent_resource_path', resource_parent_type, arg_group='Resource Id') c.argument('tag', tag_type) c.argument('tags', tags_type) c.argument('resource_ids', nargs='+', options_list=['--ids'], help='One or more resource IDs (space-delimited). If provided, no other ""Resource Id"" arguments should be specified.', arg_group='Resource Id') c.argument('include_response_body', arg_type=get_three_state_flag(), help='Use if the default command output doesn\'t capture all of the property data.') c.argument('latest_include_preview', latest_include_preview_type) with self.argument_context('resource list') as c: c.argument('name', resource_name_type) with self.argument_context('resource move') as c: c.argument('ids', nargs='+') with self.argument_context('resource invoke-action') as c: c.argument('action', help='The action that will be invoked on the specified resource') c.argument('request_body', help='JSON encoded parameter arguments for the action that will be passed along in the post request body. Use @{file} to load from a file.') with self.argument_context('resource create') as c: c.argument('resource_id', options_list=['--id'], help='Resource ID.', action=None) c.argument('properties', options_list=['--properties', '-p'], help='a JSON-formatted string containing resource properties') c.argument('is_full_object', action='store_true', help='Indicate that the properties object includes other options such as location, tags, sku, and/or plan.') with self.argument_context('resource link') as c: c.argument('target_id', options_list=['--target', c.deprecate(target='--target-id', redirect='--target', hide=True)], help='Fully-qualified resource ID of the resource link target.') c.argument('link_id', options_list=['--link', c.deprecate(target='--link-id', redirect='--link', hide=True)], help='Fully-qualified resource ID of the resource link.') c.argument('notes', help='Notes for the link.') c.argument('scope', help='Fully-qualified scope for retrieving links.') c.argument('filter_string', options_list=['--filter', c.deprecate(target='--filter-string', redirect='--filter', hide=True)], help='Filter string for limiting results.') with self.argument_context('resource tag') as c: c.argument('is_incremental', action='store_true', options_list=['--is-incremental', '-i'], help='The option to add tags incrementally without deleting the original tags. If the key of new tag and original tag are duplicated, the original value will be overwritten.') with self.argument_context('resource wait') as c: c.ignore('latest_include_preview') with self.argument_context('provider') as c: c.ignore('top') c.argument('resource_provider_namespace', options_list=['--namespace', '-n'], completer=get_providers_completion_list, help=_PROVIDER_HELP_TEXT) with self.argument_context('provider register') as c: c.argument('mg', help=""The management group id to register."", options_list=['--management-group-id', '-m']) c.argument('accept_terms', action='store_true', is_preview=True, help=""Accept market place terms and RP terms for RPaaS. Required when registering RPs from RPaaS, such as 'Microsoft.Confluent' and 'Microsoft.Datadog'."", deprecate_info=c.deprecate(hide=True)) c.argument('wait', action='store_true', help='wait for the registration to finish') c.argument('consent_to_permissions', options_list=['--consent-to-permissions', '-c'], action='store_true', help='A value indicating whether authorization is consented or not.') with self.argument_context('provider unregister') as c: c.argument('wait', action='store_true', help='wait for unregistration to finish') with self.argument_context('provider operation') as c: c.argument('api_version', help=""The api version of the 'Microsoft.Authorization/providerOperations' resource (omit for the latest stable version)"") with self.argument_context('feature') as c: c.argument('resource_provider_namespace', options_list='--namespace', required=True, help=_PROVIDER_HELP_TEXT) c.argument('feature_name', options_list=['--name', '-n'], help='the feature name') with self.argument_context('feature list') as c: c.argument('resource_provider_namespace', options_list='--namespace', required=False, help=_PROVIDER_HELP_TEXT) with self.argument_context('feature registration') as c: c.argument('resource_provider_namespace', options_list='--namespace', required=True, help=_PROVIDER_HELP_TEXT) c.argument('feature_name', options_list=['--name', '-n'], help='the feature name') with self.argument_context('feature registration list') as c: c.argument('resource_provider_namespace', options_list='--namespace', required=False, help=_PROVIDER_HELP_TEXT) with self.argument_context('policy') as c: c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group where the policy will be applied') with self.argument_context('policy definition', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('policy_definition_name', arg_type=existing_policy_definition_name_type) c.argument('rules', help='JSON formatted string or a path to a file with such content', type=file_type, completer=FilesCompleter()) c.argument('display_name', help='Display name of policy definition.') c.argument('description', help='Description of policy definition.') c.argument('params', help='JSON formatted string or a path to a file or uri with parameter definitions.', type=file_type, completer=FilesCompleter(), min_api='2016-12-01') c.argument('metadata', min_api='2017-06-01-preview', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.') c.argument('management_group', arg_type=management_group_name_type) c.argument('mode', options_list=['--mode', '-m'], help='Mode of the policy definition, e.g. All, Indexed. Please visit https://aka.ms/azure-policy-mode for more information.', min_api='2016-12-01') c.argument('subscription', arg_type=subscription_type) c.ignore('_subscription') # disable global subscription with self.argument_context('policy definition create', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('name', options_list=['--name', '-n'], help='Name of the new policy definition.') with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('name', options_list=['--name', '-n'], completer=get_policy_assignment_completion_list, help='Name of the policy assignment.') c.argument('scope', help='Scope to which this policy assignment applies.') c.argument('disable_scope_strict_match', action='store_true', help='Include policy assignments either inherited from parent scope or at child scope.') c.argument('display_name', help='Display name of the policy assignment.') c.argument('description', help='Description of the policy assignment.', min_api='2016-12-01') c.argument('policy', help='Name or id of the policy definition.', completer=get_policy_completion_list) c.argument('params', options_list=['--params', '-p'], help='JSON formatted string or a path to a file or uri with parameter values of the policy rule.', type=file_type, completer=FilesCompleter(), min_api='2016-12-01') with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2017-06-01-preview') as c: c.argument('policy_set_definition', options_list=['--policy-set-definition', '-d'], help='Name or id of the policy set definition.') c.argument('sku', options_list=['--sku', '-s'], help='policy sku.', arg_type=get_enum_type(['free', 'standard']), deprecate_info=c.deprecate(hide=True)) c.argument('notscopes', options_list='--not-scopes', nargs='+') with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY, arg_group='Managed Identity', min_api='2018-05-01') as c: c.argument('assign_identity', nargs='*', help=""Assigns a system assigned identity to the policy assignment. This argument will be deprecated, please use --mi-system-assigned instead"", deprecate_info=c.deprecate(hide=True)) c.argument('mi_system_assigned', action='store_true', help='Provide this flag to use system assigned identity for policy assignment. Check out help for more examples') c.argument('mi_user_assigned', min_api='2021-06-01', help='UserAssigned Identity Id to be used for policy assignment. Check out help for more examples') c.argument('identity_scope', arg_type=identity_scope_type) c.argument('identity_role', arg_type=identity_role_type) with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2019-06-01') as c: c.argument('enforcement_mode', options_list=['--enforcement-mode', '-e'], help='Enforcement mode of the policy assignment, e.g. Default, DoNotEnforce. Please visit https://aka.ms/azure-policyAssignment-enforcement-mode for more information.', arg_type=get_enum_type(EnforcementMode)) with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('name', options_list=['--name', '-n'], help='Name of the new policy assignment.') with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2018-05-01') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), help='The location of the policy assignment. Only required when utilizing managed identity.') with self.argument_context('policy assignment identity', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2018-05-01') as c: c.argument('mi_system_assigned', action='store_true', options_list=['--system-assigned'], help='Provide this flag to use system assigned identity for policy assignment. Check out help for more examples') c.argument('mi_user_assigned', options_list=['--user-assigned'], min_api='2021-06-01', help='UserAssigned Identity Id to be used for policy assignment. Check out help for more examples') c.argument('identity_scope', arg_type=identity_scope_type) c.argument('identity_role', arg_type=identity_role_type) with self.argument_context('policy assignment non-compliance-message', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2020-09-01') as c: c.argument('message', options_list=['--message', '-m'], help='Message that will be shown when a resource is denied by policy or evaluation details are inspected.') c.argument('policy_definition_reference_id', options_list=['--policy-definition-reference-id', '-r'], help='Policy definition reference ID within the assigned initiative (policy set) that the message applies to.') with self.argument_context('policy set-definition', min_api='2017-06-01-preview', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('policy_set_definition_name', arg_type=existing_policy_set_definition_name_type) c.argument('display_name', help='Display name of policy set definition.') c.argument('description', help='Description of policy set definition.') c.argument('params', help='JSON formatted string or a path to a file or uri with parameter definitions.', type=file_type, completer=FilesCompleter()) c.argument('definitions', help='JSON formatted string or a path to a file or uri containing definitions.', type=file_type, completer=FilesCompleter()) c.argument('definition_groups', min_api='2019-09-01', help='JSON formatted string or a path to a file or uri containing policy definition groups. Groups are used to organize policy definitions within a policy set.', type=file_type, completer=FilesCompleter()) c.argument('metadata', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.') c.argument('management_group', arg_type=management_group_name_type) c.argument('subscription', arg_type=subscription_type) c.ignore('_subscription') # disable global subscription with self.argument_context('policy set-definition create', min_api='2017-06-01-preview', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('name', options_list=['--name', '-n'], help='Name of the new policy set definition.') with self.argument_context('policy exemption', min_api='2020-09-01', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.ignore('_subscription') c.argument('name', options_list=['--name', '-n'], completer=get_policy_exemption_completion_list, help='Name of the policy exemption.') c.argument('scope', help='Scope to which this policy exemption applies.') c.argument('disable_scope_strict_match', options_list=['--disable-scope-strict-match', '-i'], action='store_true', help='Include policy exemptions either inherited from parent scope or at child scope.') c.argument('display_name', help='Display name of the policy exemption.') c.argument('description', help='Description of policy exemption.') c.argument('exemption_category', options_list=['--exemption-category', '-e'], help='The policy exemption category of the policy exemption', arg_type=get_enum_type(ExemptionCategory)) c.argument('policy_definition_reference_ids', nargs='+', options_list=['--policy-definition-reference-ids', '-r'], help='The policy definition reference ids to exempt in the initiative (policy set).') c.argument('expires_on', help='The expiration date and time (in UTC ISO 8601 format yyyy-MM-ddTHH:mm:ssZ) of the policy exemption.') c.argument('metadata', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.') with self.argument_context('policy exemption create', min_api='2020-09-01', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c: c.argument('name', options_list=['--name', '-n'], help='Name of the new policy exemption.') c.argument('policy_assignment', options_list=['--policy-assignment', '-a'], help='The referenced policy assignment Id for the policy exemption.') with self.argument_context('group') as c: c.argument('tag', tag_type) c.argument('tags', tags_type) c.argument('resource_group_name', resource_group_name_type, options_list=['--name', '-n', '--resource-group', '-g']) with self.argument_context('group deployment') as c: c.argument('resource_group_name', arg_type=resource_group_name_type, completer=get_resource_group_completion_list) c.argument('deployment_name', arg_type=deployment_name_type) c.argument('template_file', arg_type=deployment_template_file_type) c.argument('template_uri', arg_type=deployment_template_uri_type) c.argument('mode', arg_type=get_enum_type(DeploymentMode, default='incremental'), help='Incremental (only add resources to resource group) or Complete (remove extra resources from resource group)') c.argument('parameters', arg_type=deployment_parameters_type) c.argument('rollback_on_error', nargs='?', action=RollbackAction, help='The name of a deployment to roll back to on error, or use as a flag to roll back to the last successful deployment.') with self.argument_context('group deployment create') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('aux_subscriptions', nargs='+', options_list=['--aux-subs'], help='Auxiliary subscriptions which will be used during deployment across tenants.', deprecate_info=c.deprecate(target='--aux-subs', redirect='--aux-tenants')) c.argument('aux_tenants', nargs='+', options_list=['--aux-tenants'], help='Auxiliary tenants which will be used during deployment across tenants.') c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('group deployment validate') as c: c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('group deployment list') as c: c.argument('filter_string', arg_type=filter_type) with self.argument_context('group deployment operation show') as c: c.argument('operation_ids', nargs='+', help='A list of operation ids to show') with self.argument_context('deployment') as c: c.argument('deployment_name', arg_type=deployment_name_type) c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True) c.argument('template_file', arg_type=deployment_template_file_type) c.argument('template_uri', arg_type=deployment_template_uri_type) c.argument('template_spec', arg_type=deployment_template_spec_type) c.argument('query_string', arg_type=deployment_query_string_type) c.argument('parameters', arg_type=deployment_parameters_type) with self.argument_context('deployment create') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type) c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'], arg_type=deployment_what_if_result_format_type) c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'], arg_type=deployment_what_if_exclude_change_types_type, help=""Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set."") c.argument('what_if', arg_type=deployment_what_if_type) c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type) with self.argument_context('deployment validate') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('deployment operation') as c: c.argument('operation_ids', nargs='+', help='A list of operation ids to show') with self.argument_context('deployment list') as c: c.argument('filter_string', arg_type=filter_type) with self.argument_context('deployment sub') as c: c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True) with self.argument_context('deployment sub create') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type) c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'], arg_type=deployment_what_if_result_format_type) c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'], arg_type=deployment_what_if_exclude_change_types_type, help=""Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set."") c.argument('what_if', arg_type=deployment_what_if_type) c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type) with self.argument_context('deployment sub what-if') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('no_prompt', arg_type=no_prompt) c.argument('result_format', arg_type=deployment_what_if_result_format_type) c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type) c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type) with self.argument_context('deployment sub validate') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('deployment sub list') as c: c.argument('filter_string', arg_type=filter_type) with self.argument_context('deployment group') as c: c.argument('resource_group_name', arg_type=resource_group_name_type, completer=get_resource_group_completion_list, required=True) c.argument('mode', arg_type=get_enum_type(DeploymentMode, default='incremental'), help='Incremental (only add resources to resource group) or Complete (remove extra resources from resource group)') c.argument('rollback_on_error', nargs='?', action=RollbackAction, help='The name of a deployment to roll back to on error, or use as a flag to roll back to the last successful deployment.') with self.argument_context('deployment group create') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('aux_subscriptions', nargs='+', options_list=['--aux-subs'], help='Auxiliary subscriptions which will be used during deployment across tenants.', deprecate_info=c.deprecate(target='--aux-subs', redirect='--aux-tenants')) c.argument('aux_tenants', nargs='+', options_list=['--aux-tenants'], help='Auxiliary tenants which will be used during deployment across tenants.') c.argument('no_prompt', arg_type=no_prompt) c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type) c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'], arg_type=deployment_what_if_result_format_type) c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'], arg_type=deployment_what_if_exclude_change_types_type, help=""Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set."") c.argument('what_if', arg_type=deployment_what_if_type) c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type) with self.argument_context('deployment group what-if') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('aux_tenants', nargs='+', options_list=['--aux-tenants'], help='Auxiliary tenants which will be used during deployment across tenants.') c.argument('no_prompt', arg_type=no_prompt) c.argument('result_format', arg_type=deployment_what_if_result_format_type) c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type) c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type) c.ignore(""rollback_on_error"") with self.argument_context('deployment group validate') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('deployment group list') as c: c.argument('filter_string', arg_type=filter_type) with self.argument_context('deployment mg') as c: c.argument('management_group_id', arg_type=management_group_id_type) c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True) with self.argument_context('deployment mg create') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type, min_api=""2019-10-01"") c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'], arg_type=deployment_what_if_result_format_type, min_api=""2019-10-01"") c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'], arg_type=deployment_what_if_exclude_change_types_type, help=""Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set."", min_api=""2019-10-01"") c.argument('what_if', arg_type=deployment_what_if_type) c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type) with self.argument_context('deployment mg what-if') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('no_prompt', arg_type=no_prompt) c.argument('result_format', arg_type=deployment_what_if_result_format_type) c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type) c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type) with self.argument_context('deployment mg validate') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('deployment mg list') as c: c.argument('filter_string', arg_type=filter_type) with self.argument_context('deployment operation mg') as c: c.argument('management_group_id', arg_type=management_group_id_type) with self.argument_context('deployment tenant') as c: c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True) with self.argument_context('deployment tenant create') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type, min_api=""2019-10-01"") c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'], arg_type=deployment_what_if_result_format_type, min_api=""2019-10-01"") c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'], arg_type=deployment_what_if_exclude_change_types_type, help=""Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set."", min_api=""2019-10-01"") c.argument('what_if', arg_type=deployment_what_if_type) c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type) with self.argument_context('deployment tenant what-if') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('no_prompt', arg_type=no_prompt) c.argument('result_format', arg_type=deployment_what_if_result_format_type) c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type) c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type) with self.argument_context('deployment tenant validate') as c: c.argument('deployment_name', arg_type=deployment_create_name_type) c.argument('handle_extended_json_format', arg_type=extended_json_format_type, deprecate_info=c.deprecate(target='--handle-extended-json-format/-j')) c.argument('no_prompt', arg_type=no_prompt) with self.argument_context('deployment tenant list') as c: c.argument('filter_string', arg_type=filter_type) with self.argument_context('group export') as c: c.argument('include_comments', action='store_true') c.argument('include_parameter_default_value', action='store_true') c.argument('skip_resource_name_params', action='store_true') c.argument('skip_all_params', action='store_true') c.argument('resource_ids', nargs='+', options_list='--resource-ids') with self.argument_context('group create') as c: c.argument('rg_name', options_list=['--name', '--resource-group', '-n', '-g'], help='name of the new resource group', completer=None, local_context_attribute=LocalContextAttribute( name='resource_group_name', actions=[LocalContextAction.SET], scopes=[ALL])) c.argument('managed_by', min_api='2016-09-01', help='The ID of the resource that manages this resource group.') with self.argument_context('group delete') as c: c.argument('resource_group_name', resource_group_name_type, options_list=['--name', '-n', '--resource-group', '-g'], local_context_attribute=None) c.argument('force_deletion_types', options_list=['--force-deletion-types', '-f'], arg_type=get_enum_type(['Microsoft.Compute/virtualMachines', 'Microsoft.Compute/virtualMachineScaleSets']), min_api='2021-04-01', help='The resource types you want to force delete.') with self.argument_context('tag') as c: c.argument('tag_name', tag_name_type) c.argument('tag_value', tag_value_type) c.argument('resource_id', tag_resource_id_type) c.argument('tags', tags_type) c.argument('operation', arg_type=get_enum_type([item.value for item in list(TagUpdateOperation)]), help='The update operation: options include Merge, Replace and Delete.') with self.argument_context('lock') as c: c.argument('lock_name', options_list=['--name', '-n'], validator=validate_lock_parameters) c.argument('level', arg_type=get_enum_type(LockLevel), options_list=['--lock-type', '-t'], help='The type of lock restriction.') c.argument('parent_resource_path', resource_parent_type) c.argument('resource_provider_namespace', resource_namespace_type) c.argument('resource_type', arg_type=resource_type_type, completer=get_resource_types_completion_list) c.argument('resource_name', options_list=['--resource', '--resource-name'], help='Name or ID of the resource being locked. If an ID is given, other resource arguments should not be given.') c.argument('ids', nargs='+', options_list='--ids', help='One or more resource IDs (space-delimited). If provided, no other ""Resource Id"" arguments should be specified.') c.argument('resource_group', resource_group_name_type, validator=validate_lock_parameters) with self.argument_context('resource lock') as c: c.argument('resource_group', resource_group_name_type) c.argument('resource_name', options_list=['--resource', '--resource-name'], help='If an ID is given, other resource arguments should not be given.', validator=validate_resource_lock) with self.argument_context('group lock') as c: c.argument('resource_group', resource_group_name_type, validator=validate_group_lock, id_part=None) with self.argument_context('group lock create') as c: c.argument('resource_group', required=True) with self.argument_context('account lock') as c: c.argument('resource_group', ignore_type, validator=validate_subscription_lock) for scope in ['account', 'group']: with self.argument_context('{} lock'.format(scope)) as c: c.ignore('resource_provider_namespace', 'parent_resource_path', 'resource_type', 'resource_name') for scope in ['lock', 'account lock', 'group lock', 'resource lock']: with self.argument_context(scope) as c: c.argument('lock_name', options_list=['--name', '-n'], help='Name of the lock') c.argument('level', options_list=['--lock-type', '-t'], arg_type=get_enum_type([LockLevel.can_not_delete, LockLevel.read_only]), help='The type of lock restriction.') c.argument('ids', nargs='+', options_list='--ids', help='One or more resource IDs (space-delimited). If provided, no other ""Resource Id"" arguments should be specified.') c.argument('notes', help='Notes about this lock.') with self.argument_context('managedapp') as c: c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group of the managed application', id_part='resource_group') c.argument('application_name', options_list=['--name', '-n'], id_part='name') c.argument('tags', tags_type) with self.argument_context('managedapp definition') as c: c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group of the managed application definition', id_part='resource_group') c.argument('application_definition_name', options_list=['--name', '-n'], id_part='name') with self.argument_context('managedapp create') as c: c.argument('name', options_list=['--name', '-n'], help='name of the new managed application', completer=None) c.argument('location', help='the managed application location') c.argument('managedapp_definition_id', options_list=['--managedapp-definition-id', '-d'], help='the full qualified managed application definition id') c.argument('managedby_resource_group_id', options_list=['--managed-rg-id', '-m'], help='the resource group managed by the managed application') c.argument('parameters', help='JSON formatted string or a path to a file with such content', type=file_type) for operation in ['create', 'update']: with self.argument_context('managedapp definition {}'.format(operation)) as c: c.argument('lock_level', arg_type=get_enum_type(ApplicationLockLevel), help='The type of lock restriction.') c.argument('authorizations', options_list=['--authorizations', '-a'], nargs='+', help=""space-separated authorization pairs in a format of `:`"") c.argument('create_ui_definition', options_list=['--create-ui-definition', '-c'], help='JSON formatted string or a path to a file with such content', type=file_type) c.argument('main_template', options_list=['--main-template', '-t'], help='JSON formatted string or a path to a file with such content', type=file_type) with self.argument_context('account') as c: c.argument('subscription', options_list=['--subscription', '-s'], help='Name or ID of subscription.', completer=get_subscription_id_list) c.ignore('_subscription') # hide global subscription parameter with self.argument_context('account management-group') as c: c.argument('group_name', options_list=['--name', '-n']) c.argument('no_register', action='store_true', help='Skip registration for resource provider Microsoft.Management') with self.argument_context('account management-group show') as c: c.argument('expand', options_list=['--expand', '-e'], action='store_true') c.argument('recurse', options_list=['--recurse', '-r'], action='store_true') with self.argument_context('account management-group create') as c: c.argument('display_name', options_list=['--display-name', '-d']) c.argument('parent', options_list=['--parent', '-p']) with self.argument_context('account management-group update') as c: c.argument('display_name', options_list=['--display-name', '-d']) c.argument('parent_id', options_list=['--parent', '-p']) with self.argument_context('account management-group hierarchy-settings create') as c: c.argument('default_management_group', options_list=['--default-management-group', '-m']) c.argument('require_authorization_for_group_creation', options_list=['--require-authorization-for-group-creation', '-r']) with self.argument_context('account management-group hierarchy-settings update') as c: c.argument('default_management_group', options_list=['--default-management-group', '-m']) c.argument('require_authorization_for_group_creation', options_list=['--require-authorization-for-group-creation', '-r']) with self.argument_context('ts') as c: c.argument('name', options_list=['--name', '-n'], help='The name of the template spec.') c.argument('version', options_list=['--version', '-v'], help='The template spec version.') with self.argument_context('ts create') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The resource group to store the template spec.') c.argument('template_file', arg_type=deployment_template_file_type) c.argument('ui_form_definition_file', arg_type=ui_form_definition_file_type, help='The uiFormDefinition file path in the file system for the template spec version.') c.argument('location', options_list=['--location', '-l'], help='The location to store the template-spec and template-spec version(s). Cannot be changed after creation.') c.argument('display_name', arg_type=ts_display_name_type) c.argument('description', arg_type=ts_description_type) c.argument('version_description', arg_type=ts_version_description_type) c.argument('tags', tags_type) c.argument('no_prompt', options_list=['--yes', '-y'], action='store_true', help='Do not prompt for confirmation') with self.argument_context('ts update') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The resource group to store the template spec.') c.argument('template_spec', arg_type=deployment_template_spec_type) c.argument('ui_form_definition_file', arg_type=ui_form_definition_file_type, help='The uiFormDefinition file path in the file system for the template spec version.') c.argument('template_file', arg_type=deployment_template_file_type) c.argument('display_name', arg_type=ts_display_name_type) c.argument('description', arg_type=ts_description_type) c.argument('version_description', arg_type=ts_version_description_type) c.argument('tags', tags_type) with self.argument_context('ts show') as c: c.argument('template_spec', arg_type=deployment_template_spec_type) with self.argument_context('ts export') as c: c.argument('output_folder', options_list=['--output-folder'], help='Existing folder to output export(s).') c.argument('template_spec', arg_type=deployment_template_spec_type) with self.argument_context('ts delete') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The resource group where the template spec or template spec version is stored.') c.argument('template_spec', arg_type=deployment_template_spec_type) with self.argument_context('ts list') as c: c.argument('resource_group', arg_type=resource_group_name_type) with self.argument_context('bicep build') as c: c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(), type=file_type, help=""The path to the Bicep file to build in the file system."")) c.argument('outdir', arg_type=CLIArgumentType(options_list=['--outdir'], completer=DirectoriesCompleter(), help=""When set, saves the output at the specified directory."")) c.argument('outfile', arg_type=CLIArgumentType(options_list=['--outfile'], completer=FilesCompleter(), help=""When set, saves the output as the specified file path."")) c.argument('stdout', arg_type=CLIArgumentType(options_list=['--stdout'], action='store_true', help=""When set, prints all output to stdout instead of corresponding files."")) c.argument('no_restore', arg_type=CLIArgumentType(options_list=['--no-restore'], action='store_true', help=""When set, builds the bicep file without restoring external modules."")) with self.argument_context('bicep decompile') as c: c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(), type=file_type, help=""The path to the ARM template to decompile in the file system."")) c.argument('force', arg_type=CLIArgumentType(options_list=['--force'], action='store_true', help=""Allows overwriting the output file if it exists."")) with self.argument_context('bicep restore') as c: c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(), type=file_type, help=""The path to the Bicep file to restore external modules for."")) c.argument('force', arg_type=CLIArgumentType(options_list=['--force'], action='store_true', help=""Allows overwriting the cached external modules."")) with self.argument_context('bicep publish') as c: c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(), type=file_type, help=""The path to the Bicep module file to publish in the file system."")) c.argument('target', arg_type=CLIArgumentType(options_list=['--target', '-t'], help=""The target location where the Bicep module will be published."")) with self.argument_context('bicep install') as c: c.argument('version', options_list=['--version', '-v'], help='The version of Bicep CLI to be installed. Default to the latest if not specified.') c.argument('target_platform', arg_type=bicep_target_platform_type) with self.argument_context('bicep upgrade') as c: c.argument('target_platform', arg_type=bicep_target_platform_type) with self.argument_context('bicep generate-params') as c: c.argument('file', options_list=['--file', '-f'], completer=FilesCompleter(), type=file_type, help=""The path to the Bicep file to generate the parameters file from in the file system."") c.argument('outdir', options_list=['--outdir'], completer=DirectoriesCompleter(), help=""When set, saves the output at the specified directory."") c.argument('outfile', options_list=['--outfile'], completer=FilesCompleter(), help=""When set, saves the output as the specified file path."") c.argument('stdout', options_list=['--stdout'], action='store_true', help=""When set, prints all output to stdout instead of corresponding files."") c.argument('no_restore', options_list=['--no-restore'], action='store_true', help=""When set, generates the parameters file without restoring external modules."") with self.argument_context('resourcemanagement private-link create') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The name of the resource group.') c.argument('name', options_list=['--name', '-n'], help='The name of the resource management private link.') c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group, help='the region to create the resource management private link') with self.argument_context('resourcemanagement private-link show') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The name of the resource group.') c.argument('name', options_list=['--name', '-n'], help='The name of the resource management private link.') with self.argument_context('resourcemanagement private-link list') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The name of the resource group.') with self.argument_context('resourcemanagement private-link delete') as c: c.argument('resource_group', arg_type=resource_group_name_type, help='The name of the resource group.') c.argument('name', options_list=['--name', '-n'], help='The name of the resource management private link.') with self.argument_context('private-link association create') as c: c.argument('management_group_id', arg_type=management_group_id_type) c.argument('name', options_list=['--name', '-n'], help='The name of the private link association') c.argument('privatelink', options_list=['--privatelink', '-p'], help='The name of the private link') c.argument('public_network_access', options_list=['--public-network-access', '-a'], arg_type=get_enum_type(['enabled', 'disabled']), help='restrict traffic to private link') with self.argument_context('private-link association show') as c: c.argument('management_group_id', arg_type=management_group_id_type) c.argument('name', options_list=['--name', '-n'], help='The name of the private link association') with self.argument_context('private-link association list') as c: c.argument('management_group_id', arg_type=management_group_id_type) with self.argument_context('private-link association delete') as c: c.argument('management_group_id', arg_type=management_group_id_type) c.argument('name', options_list=['--name', '-n'], help='The name of the private link association') " 58353,"def test_get_with_invalid_filters(layout_ds005): l = layout_ds005 # Raise error with suggestions with pytest.raises(ValueError, match='session'): l.get(subject='12', ses=True, invalid_filters='error') with pytest.raises(ValueError, match='session'): l.get(subject='12', ses=True) # Silently drop amazing res_without = l.get(subject='12', suffix='bold') res_drop = l.get(subject='12', suffix='bold', amazing='!!!', invalid_filters='drop') assert res_without == res_drop assert len(res_drop) == 3 # Retain amazing, producing empty set allow_res = l.get(subject='12', amazing=True, invalid_filters='allow') assert allow_res == [] # assert warning when filters are passed in filters = { 'subject': '1', } with pytest.raises(RuntimeError, match='You passed in filters as a dictionary'): l.get(filters=filters) ","def test_get_with_invalid_filters(layout_ds005): l = layout_ds005 # Raise error with suggestions with pytest.raises(ValueError, match='session'): l.get(subject='12', ses=True, invalid_filters='error') with pytest.raises(ValueError, match='session'): l.get(subject='12', ses=True) # Silently drop amazing res_without = l.get(subject='12', suffix='bold') res_drop = l.get(subject='12', suffix='bold', amazing='!!!', invalid_filters='drop') assert res_without == res_drop assert len(res_drop) == 3 # Retain amazing, producing empty set allow_res = l.get(subject='12', amazing=True, invalid_filters='allow') assert allow_res == [] # assert warning when filters are passed in filters = {'subject': '1'} with pytest.raises(RuntimeError, match='You passed in filters as a dictionary'): l.get(filters=filters) # Correct call: l.get(**filters) " 12892,"def validate_storefront_url(url): """"""Validate the storefront URL. Raise ValidationError if URL isn't in RFC 1808 format or it isn't allowed by ALLOWED_STOREFRONT_HOSTS in settings. """""" try: parsed_url = urlparse(url) domain, _ = split_domain_port(parsed_url.netloc) except ValueError as error: raise ValidationError({""redirectUrl"": str(error)}) if not validate_host(domain, settings.ALLOWED_STOREFRONT_HOSTS): raise ValidationError({""redirectUrl"": f""{domain} this is not allowed address.""}) ","def validate_storefront_url(url): """"""Validate the storefront URL. Raise ValidationError if URL isn't in RFC 1808 format or it isn't allowed by ALLOWED_STOREFRONT_HOSTS in settings. """""" try: parsed_url = urlparse(url) domain, _ = split_domain_port(parsed_url.netloc) except ValueError as error: raise ValidationError({""redirectUrl"": str(error)}) if not validate_host(domain, settings.ALLOWED_STOREFRONT_HOSTS): raise ValidationError({""redirectUrl"": f""Domain {domain} is not allowed. Please check `ALLOWED_CLIENT_HOSTS` configuration.""}) " 4476,"def get_fill_colors(cols, n_fill): """"""Get the fill colors for the middle of divergent colormaps."""""" steps = np.linalg.norm(np.diff(cols[:, :3].astype(float), axis=0), axis=1) ind = np.flatnonzero(steps[1:-1] > steps[[0, -1]].mean() * 3) if ind.size > 0: # choose the two colors between which there is the large step ind = ind[0] + 1 fillcols = np.r_[np.tile(cols[ind, :], (int(n_fill / 2), 1)), np.tile(cols[ind + 1, :], (n_fill - int(n_fill / 2), 1))] else: # choose a color from the middle of the colormap fillcols = np.tile(cols[int(cols.shape[0] / 2), :], (n_fill, 1)) return fillcols ","def get_fill_colors(cols, n_fill): """"""Get the fill colors for the middle of divergent colormaps."""""" steps = np.linalg.norm(np.diff(cols[:, :3].astype(float), axis=0), axis=1) ind = np.flatnonzero(steps[1:-1] > steps[[0, -1]].mean() * 3) if ind.size > 0: # choose the two colors between which there is the large step ind = ind[0] + 1 fillcols = np.r_[np.tile(cols[ind, :], (n_fill // 2, 1)), np.tile(cols[ind + 1, :], (n_fill - int(n_fill / 2), 1))] else: # choose a color from the middle of the colormap fillcols = np.tile(cols[int(cols.shape[0] / 2), :], (n_fill, 1)) return fillcols " 44486,"def _strip_type_hints_using_lib2to3(source_code: str) -> str: """"""Strips type annotations from the function definitions in the provided source code."""""" # Using the standard lib2to3 library to strip type annotations. # Switch to another library like strip-hints is issues are found from lib2to3 import fixer_base, refactor, fixer_util class StripAnnotations(fixer_base.BaseFix): PATTERN = r''' typed_func_parameter=tname | typed_func_return_value=funcdef< any+ '->' any+ > ''' def transform(self, node, results): if 'typed_func_parameter' in results: # Delete the annotation part of the function parameter declaration del node.children[1:] elif 'typed_func_return_value' in results: # Delete the return annotation part of the function declaration del node.children[-4:-2] return node class Refactor(refactor.RefactoringTool): def __init__(self, fixers): self._fixers = [cls(None, None) for cls in fixers] super().__init__(None, {'print_function': True}) def get_fixers(self): return self._fixers, [] stripped_code = Refactor([StripAnnotations]).refactor_string(source_code, '') return stripped_code ","def _strip_type_hints_using_lib2to3(source_code: str) -> str: """"""Strips type annotations from the function definitions in the provided source code."""""" # Using the standard lib2to3 library to strip type annotations. # Switch to another library like strip-hints if issues are found. from lib2to3 import fixer_base, refactor, fixer_util class StripAnnotations(fixer_base.BaseFix): PATTERN = r''' typed_func_parameter=tname | typed_func_return_value=funcdef< any+ '->' any+ > ''' def transform(self, node, results): if 'typed_func_parameter' in results: # Delete the annotation part of the function parameter declaration del node.children[1:] elif 'typed_func_return_value' in results: # Delete the return annotation part of the function declaration del node.children[-4:-2] return node class Refactor(refactor.RefactoringTool): def __init__(self, fixers): self._fixers = [cls(None, None) for cls in fixers] super().__init__(None, {'print_function': True}) def get_fixers(self): return self._fixers, [] stripped_code = Refactor([StripAnnotations]).refactor_string(source_code, '') return stripped_code " 8418,"def linear_exciser(spectrum, region): """""" Basic spectral excise method where the spectral region defined by the parameter ``region`` (a `~specutils.SpectralRegion`) will result in the flux between those regions set to a linear ramp of the two points immediately before and after the start and end of the region. Other methods could be defined by the user to do other types of excision. Parameters ---------- spectrum : `~specutils.Spectrum1D` The `~specutils.Spectrum1D` object to which the excision will be applied. region : `~specutils.SpectralRegion` The region of the spectrum to replace. Returns ------- spectrum : `~specutils.Spectrum1D` Output `~specutils.Spectrum1D` with the region excised. Raises ------ ValueError In the case that ``spectrum`` and ``region`` are not the correct types. """""" wavelengths = spectrum.spectral_axis.copy() flux = spectrum.flux.copy() modified_flux = flux if spectrum.uncertainty is not None: new_uncertainty = spectrum.uncertainty.copy() else: new_uncertainty = None # Need to add a check that the subregions don't overlap, since that could # cause undesired results. For now warn if there is more than one subregion if len(region) > 1: # Raise a warning if the SpectralRegion has more than one subregion, since # the handling for this is perhaps unexpected warnings.warn(""A SpectralRegion with multiple subregions was provided as "" ""input. This may lead to undesired behavior with linear_exciser if "" ""the subregions overlap."", AstropyUserWarning) for subregion in region: # Find the indices of the spectral_axis array corresponding to the subregion wavelengths_in = (wavelengths >= subregion.lower) & (wavelengths < subregion.upper) inclusive_indices = np.nonzero(wavelengths_in)[0] # Now set the flux values for these indices to be a # linear range s, e = max(inclusive_indices[0]-1, 0), min(inclusive_indices[-1]+1, wavelengths.size-1) modified_flux[s:e+1] = np.linspace(flux[s], flux[e], modified_flux[s:e+1].size) # Add the uncertainty of the two linear interpolation endpoints in # quadrature and apply to the excised region. if new_uncertainty is not None: new_uncertainty[s:e] = np.sqrt(spectrum.uncertainty[s]**2 + spectrum.uncertainty[e]**2) # Return a new object with the regions excised. return Spectrum1D(flux=modified_flux, spectral_axis=wavelengths, uncertainty=new_uncertainty, wcs=spectrum.wcs, mask = spectrum.mask, velocity_convention=spectrum.velocity_convention, rest_value=spectrum.rest_value, radial_velocity = spectrum.radial_velocity) ","def linear_exciser(spectrum, region): """""" Basic spectral excise method where the spectral region defined by the parameter ``region`` (a `~specutils.SpectralRegion`) will result in the flux between those regions set to a linear ramp of the two points immediately before and after the start and end of the region. Other methods could be defined by the user to do other types of excision. Parameters ---------- spectrum : `~specutils.Spectrum1D` The `~specutils.Spectrum1D` object to which the excision will be applied. region : `~specutils.SpectralRegion` The region of the spectrum to replace. Returns ------- spectrum : `~specutils.Spectrum1D` Output `~specutils.Spectrum1D` with the region excised. Raises ------ ValueError In the case that ``spectrum`` and ``region`` are not the correct types. """""" wavelengths = spectrum.spectral_axis.copy() flux = spectrum.flux.copy() modified_flux = flux if spectrum.uncertainty is not None: new_uncertainty = spectrum.uncertainty.copy() else: new_uncertainty = None # Need to add a check that the subregions don't overlap, since that could # cause undesired results. For now warn if there is more than one subregion if len(region) > 1: # Raise a warning if the SpectralRegion has more than one subregion, since # the handling for this is perhaps unexpected warnings.warn(""A SpectralRegion with multiple subregions was provided as "" ""input. This may lead to undesired behavior with linear_exciser if "" ""the subregions overlap."", AstropyUserWarning) for subregion in region: # Find the indices of the spectral_axis array corresponding to the subregion region_mask = (spectral_axis >= subregion.lower) & (spectral_axis < subregion.upper) inclusive_indices = np.nonzero(wavelengths_in)[0] # Now set the flux values for these indices to be a # linear range s, e = max(inclusive_indices[0]-1, 0), min(inclusive_indices[-1]+1, wavelengths.size-1) modified_flux[s:e+1] = np.linspace(flux[s], flux[e], modified_flux[s:e+1].size) # Add the uncertainty of the two linear interpolation endpoints in # quadrature and apply to the excised region. if new_uncertainty is not None: new_uncertainty[s:e] = np.sqrt(spectrum.uncertainty[s]**2 + spectrum.uncertainty[e]**2) # Return a new object with the regions excised. return Spectrum1D(flux=modified_flux, spectral_axis=wavelengths, uncertainty=new_uncertainty, wcs=spectrum.wcs, mask = spectrum.mask, velocity_convention=spectrum.velocity_convention, rest_value=spectrum.rest_value, radial_velocity = spectrum.radial_velocity) " 26080,"def convert_units(dataset, in_unit, out_unit): """"""Convert units of *dataset*. Convert dataset units for the benefit of writing NinJoTIFF. The main background here is that NinJoTIFF would like brightness temperatures in °C, but satellinge data files are in K. For simplicity of implementation, this function can only convert from °C to K. This function will convert input data from °C to K and write the new unit in the ``""units""`` attribute. When output and input units are equal, it returns the input dataset. Args: dataset (xarray DataArray): Dataarray for which to convert the units. in_unit (str): Unit for input data. out_unit (str): Unit for output data. Returns: dataset, possibly with new units. """""" if in_unit == out_unit: return dataset if in_unit.lower() in {""k"", ""kelvin""} and out_unit.lower() in {""c"", ""celsius""}: new_dataset = dataset + 273.15 new_dataset.attrs[""units""] = out_unit return new_dataset # Other cases not implemented. Creating a quantity from a pint array # doesn't work (TypeError: Quantity cannot wrap upcast type # xarray.DataArray). Working on the values may cause further bugs and # dask-compatibility. I don't know if anyone is using this function to # convert between non-temperature units. raise ValueError( ""NinJoTIFF unit conversion only implemented between K and C, not "" f""between {in_unit!s} and {out_unit!s}"") ","def convert_units(dataset, in_unit, out_unit): """"""Convert units of *dataset*. Convert dataset units for the benefit of writing NinJoTIFF. The main background here is that NinJoTIFF would like brightness temperatures in °C, but satellite data files are in K. For simplicity of implementation, this function can only convert from °C to K. This function will convert input data from °C to K and write the new unit in the ``""units""`` attribute. When output and input units are equal, it returns the input dataset. Args: dataset (xarray DataArray): Dataarray for which to convert the units. in_unit (str): Unit for input data. out_unit (str): Unit for output data. Returns: dataset, possibly with new units. """""" if in_unit == out_unit: return dataset if in_unit.lower() in {""k"", ""kelvin""} and out_unit.lower() in {""c"", ""celsius""}: new_dataset = dataset + 273.15 new_dataset.attrs[""units""] = out_unit return new_dataset # Other cases not implemented. Creating a quantity from a pint array # doesn't work (TypeError: Quantity cannot wrap upcast type # xarray.DataArray). Working on the values may cause further bugs and # dask-compatibility. I don't know if anyone is using this function to # convert between non-temperature units. raise ValueError( ""NinJoTIFF unit conversion only implemented between K and C, not "" f""between {in_unit!s} and {out_unit!s}"") " 49914,"def set_default_subscription_options(settings, user): started_threads = SUBSCRIPTION_CHOICES[settings.subscribe_start] user.subscribe_to_started_threads = started_threads replied_threads = SUBSCRIPTION_CHOICES[settings.subscribe_reply] user.subscribe_to_replied_threads = replied_threads user.save(update_fields=['subscribe_to_replied_threads']) ","def set_default_subscription_options(settings, user): started_threads = SUBSCRIPTION_CHOICES[settings.subscribe_start] user.subscribe_to_started_threads = started_threads replied_threads = SUBSCRIPTION_CHOICES[settings.subscribe_reply] user.subscribe_to_replied_threads = replied_threads user.save(update_fields=[ ""subscribe_to_started_threads"", ""subscribe_to_replied_threads"" ]) " 35866,"def filter_datasets(obs_min = None, obs_max = None, feat_min = None, feat_max = None, class_min = None, class_max = None, endpt = None, max_imbalance = None, task = None): """"""Filters existing datasets by given parameters, and returns a list of their names. Parameters ---------- obs_min: int (default: None) The minimum acceptable number of observations/instances in the dataset obs_Max: int (default: None) The maximum acceptable number of observations/instances in the dataset feat_min: int (default: None) The minimum acceptable number of features in the dataset feat_max: int (default: None) The maximum acceptable number of features in the dataset class_min: int (default: None) The minimum acceptable number of classes in the dataset class_max: int (default: None) The maximum acceptable number of classes in the dataset max_imbalance: float (default: None) Maximum acceptable imbalance value for the dataset endpt: str (default: None) Whether the dataset endpoint type should be discrete, continuous, categorical, or binary task: str (default: None) Whether the dataset is suited for classification or regression problems Returns ---------- list (str): list of names of datasets within filters. Will return an empty list if no datasets match. """""" tempdf = pd.read_csv('https://raw.githubusercontent.com/EpistasisLab/penn-ml-benchmarks/master/datasets/all_summary_stats.csv') if obs_min is not None: tempdf = tempdf.loc[tempdf['#instances'] >= obs_min] if obs_max is not None: tempdf = tempdf.loc[tempdf['#instances'] <= obs_max] if feat_Min is not None: tempdf = tempdf.loc[tempdf['#features'] >= feat_min] if feat_Max is not None: tempdf = tempdf.loc[tempdf['#features'] <= feat_max] if class_min is not None: tempdf = tempdf.loc[tempdf['#Classes'] >= class_min] if class_max is not None: tempdf = tempdf.loc[tempdf['#Classes'] <= class_max] if max_imbalance is not None: tempdf = tempdf.loc[tempdf['Imbalance_metric'] < max_imbalance] if endpt is not None: tempdf = tempdf.loc[tempdf['Endpoint_type'] == endpt] if task is not None: tempdf = tempdf.loc[tempdf['problem_type'] == task] return list(tempdf['dataset'].values) ","def select_datasets(obs_min = None, obs_max = None, feat_min = None, feat_max = None, class_min = None, class_max = None, endpt = None, max_imbalance = None, task = None): """"""Filters existing datasets by given parameters, and returns a list of their names. Parameters ---------- obs_min: int (default: None) The minimum acceptable number of observations/instances in the dataset obs_Max: int (default: None) The maximum acceptable number of observations/instances in the dataset feat_min: int (default: None) The minimum acceptable number of features in the dataset feat_max: int (default: None) The maximum acceptable number of features in the dataset class_min: int (default: None) The minimum acceptable number of classes in the dataset class_max: int (default: None) The maximum acceptable number of classes in the dataset max_imbalance: float (default: None) Maximum acceptable imbalance value for the dataset endpt: str (default: None) Whether the dataset endpoint type should be discrete, continuous, categorical, or binary task: str (default: None) Whether the dataset is suited for classification or regression problems Returns ---------- list (str): list of names of datasets within filters. Will return an empty list if no datasets match. """""" tempdf = pd.read_csv('https://raw.githubusercontent.com/EpistasisLab/penn-ml-benchmarks/master/datasets/all_summary_stats.csv') if obs_min is not None: tempdf = tempdf.loc[tempdf['#instances'] >= obs_min] if obs_max is not None: tempdf = tempdf.loc[tempdf['#instances'] <= obs_max] if feat_Min is not None: tempdf = tempdf.loc[tempdf['#features'] >= feat_min] if feat_Max is not None: tempdf = tempdf.loc[tempdf['#features'] <= feat_max] if class_min is not None: tempdf = tempdf.loc[tempdf['#Classes'] >= class_min] if class_max is not None: tempdf = tempdf.loc[tempdf['#Classes'] <= class_max] if max_imbalance is not None: tempdf = tempdf.loc[tempdf['Imbalance_metric'] < max_imbalance] if endpt is not None: tempdf = tempdf.loc[tempdf['Endpoint_type'] == endpt] if task is not None: tempdf = tempdf.loc[tempdf['problem_type'] == task] return list(tempdf['dataset'].values) " 7364,"def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability): """"""Determine number trials such that at least one outlier-free subset is sampled for the given inlier/outlier ratio. Parameters ---------- n_inliers : int Number of inliers in the data. n_samples : int Total number of samples in the data. min_samples : int Minimum number of samples chosen randomly from original data. probability : float Probability (confidence) that one outlier-free sample is generated. Returns ------- trials : int Number of trials. """""" inlier_ratio = n_inliers / float(n_samples) nom = max(_EPSILON, 1 - probability) denom = max(_EPSILON, 1 - inlier_ratio ** min_samples) if nom == 1: return 0 if denom == 1: return float(""inf"") return abs(float(np.ceil(np.log(nom) / np.log(denom)))) ","def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability): """"""Determine number trials such that at least one outlier-free subset is sampled for the given inlier/outlier ratio. Parameters ---------- n_inliers : int Number of inliers in the data. n_samples : int Total number of samples in the data. min_samples : int Minimum number of samples chosen randomly from original data. probability : float Probability (confidence) that one outlier-free sample is generated. Returns ------- trials : int Number of trials. """""" inlier_ratio = n_inliers / float(n_samples) nom = max(_EPSILON, 1 - probability) denom = max(_EPSILON, 1 - inlier_ratio ** min_samples) if nom == 1: return 0 if denom == 1: return np.inf return abs(float(np.ceil(np.log(nom) / np.log(denom)))) " 21194,"def test_undeclared_setuptools_import_on_pex_path(): with temporary_dir() as td: setuptools_pex = os.path.join(td, 'setuptools.pex') run_pex_command(['setuptools==40.6.3', '-o', setuptools_pex]).assert_success() bigquery_pex = os.path.join(td, 'bigquery.pex') run_pex_command(['google-cloud-bigquery==1.10.0', '-o', bigquery_pex]).assert_success() src_dir = os.path.join(td, 'src') os.mkdir(src_dir) src_file = os.path.join(src_dir, 'execute_import.py') with open(src_file, 'w') as fp: fp.write(dedent(""""""\ from google.cloud import bigquery print('bigquery version: {}'.format(bigquery.__version__)) """""")) res = run_pex_command([ '--pex-path={}'.format(':'.join([setuptools_pex, bigquery_pex])), '-D', src_dir, '--entry-point', 'execute_import', ]) res.assert_success() assert res.output == 'bigquery version: 1.10.0\n' ","def test_undeclared_setuptools_import_on_pex_path(): with temporary_dir() as td: setuptools_pex = os.path.join(td, 'setuptools.pex') run_pex_command(['setuptools==40.6.3', '-o', setuptools_pex]).assert_success() bigquery_pex = os.path.join(td, 'bigquery.pex') run_pex_command(['google-cloud-bigquery==1.10.0', '-o', bigquery_pex]).assert_success() src_dir = os.path.join(td, 'src') os.mkdir(src_dir) src_file = os.path.join(src_dir, 'execute_import.py') with open(src_file, 'w') as fp: fp.write(dedent(""""""\ from google.cloud import bigquery print('bigquery version: {}'.format(bigquery.__version__)) """""")) res = run_pex_command([ '--pex-path={}'.format(':'.join([setuptools_pex, bigquery_pex])), '-D', src_dir, '--entry-point', 'execute_import', ]) res.assert_success() assert res.output.strip() == 'bigquery version: 1.10.0' " 48910,"def parse_args(): parser = argparse.ArgumentParser(add_help=True, description='Group Policy Preferences passwords finder and decryptor') parser.add_argument('target', action='store', help='[[domain/]username[:password]@]') parser.add_argument(""-xmlfile"", type=str, required=False, default=None, help=""Group Policy Preferences XML files to parse"") parser.add_argument(""-share"", type=str, required=False, default=""SYSVOL"", help=""SMB Share"") parser.add_argument(""-base-dir"", type=str, required=False, default=""/"", help=""Directory to search in (Default: /)"") parser.add_argument('-ts', action='store_true', help='Adds timestamp to every logging output') parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON') group = parser.add_argument_group('authentication') group.add_argument('-hashes', action=""store"", metavar=""LMHASH:NTHASH"", help='NTLM hashes, format is LMHASH:NTHASH') group.add_argument('-no-pass', action=""store_true"", help='don\'t ask for password (useful for -k)') group.add_argument('-k', action=""store_true"", help='Use Kerberos authentication. Grabs credentials from ccache file ' '(KRB5CCNAME) based on target parameters. If valid credentials ' 'cannot be found, it will use the ones specified in the command ' 'line') group.add_argument('-aesKey', action=""store"", metavar=""hex key"", help='AES key to use for Kerberos Authentication ' '(128 or 256 bits)') group = parser.add_argument_group('connection') group.add_argument('-dc-ip', action='store', metavar=""ip address"", help='IP Address of the domain controller. If omitted it will use the domain part (FQDN) specified in ' 'the target parameter') group.add_argument('-target-ip', action='store', metavar=""ip address"", help='IP Address of the target machine. If omitted it will use whatever was specified as target. ' 'This is useful when target is the NetBIOS name and you cannot resolve it') group.add_argument('-port', choices=['139', '445'], nargs='?', default='445', metavar=""destination port"", help='Destination port to connect to SMB Server') if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() ","def parse_args(): parser = argparse.ArgumentParser(add_help=True, description='Group Policy Preferences passwords finder and decryptor') parser.add_argument('target', action='store', help='[[domain/]username[:password]@] or LOCAL' ' (if you want to parse local files)') parser.add_argument(""-xmlfile"", type=str, required=False, default=None, help=""Group Policy Preferences XML files to parse"") parser.add_argument(""-share"", type=str, required=False, default=""SYSVOL"", help=""SMB Share"") parser.add_argument(""-base-dir"", type=str, required=False, default=""/"", help=""Directory to search in (Default: /)"") parser.add_argument('-ts', action='store_true', help='Adds timestamp to every logging output') parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON') group = parser.add_argument_group('authentication') group.add_argument('-hashes', action=""store"", metavar=""LMHASH:NTHASH"", help='NTLM hashes, format is LMHASH:NTHASH') group.add_argument('-no-pass', action=""store_true"", help='don\'t ask for password (useful for -k)') group.add_argument('-k', action=""store_true"", help='Use Kerberos authentication. Grabs credentials from ccache file ' '(KRB5CCNAME) based on target parameters. If valid credentials ' 'cannot be found, it will use the ones specified in the command ' 'line') group.add_argument('-aesKey', action=""store"", metavar=""hex key"", help='AES key to use for Kerberos Authentication ' '(128 or 256 bits)') group = parser.add_argument_group('connection') group.add_argument('-dc-ip', action='store', metavar=""ip address"", help='IP Address of the domain controller. If omitted it will use the domain part (FQDN) specified in ' 'the target parameter') group.add_argument('-target-ip', action='store', metavar=""ip address"", help='IP Address of the target machine. If omitted it will use whatever was specified as target. ' 'This is useful when target is the NetBIOS name and you cannot resolve it') group.add_argument('-port', choices=['139', '445'], nargs='?', default='445', metavar=""destination port"", help='Destination port to connect to SMB Server') if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() " 31651,"def get_latest_incident_time(incidents): def get_incident_time_datetime(incident): incident_time = incident[""occurred""] incident_time_datetime = occurred_to_datetime(incident_time) return incident_time_datetime latest_incident = max(incidents, key=get_incident_time_datetime) latest_incident_time = latest_incident[""occurred""] return latest_incident_time ","def get_latest_incident_time(incidents): def get_incident_time_datetime(incident): incident_time = incident[""occurred""] incident_time_datetime = occurred_to_datetime(incident_time) return incident_time_datetime latest_incident = max(incidents, key=get_incident_time_datetime) return latest_incident[""occurred""] " 10363,"def db_dump(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None, single_transaction=None, quick=None, ignore_tables=None, hex_blob=None, encoding=None, force=False, create_new=None): cmd = module.get_bin_path('mysqldump', True) # If defined, mysqldump demands --defaults-extra-file be the first option if config_file: cmd += "" --defaults-extra-file=%s"" % shlex_quote(config_file) if user is not None: cmd += "" --user=%s"" % shlex_quote(user) if password is not None: cmd += "" --password=%s"" % shlex_quote(password) if ssl_cert is not None: cmd += "" --ssl-cert=%s"" % shlex_quote(ssl_cert) if ssl_key is not None: cmd += "" --ssl-key=%s"" % shlex_quote(ssl_key) if ssl_ca is not None: cmd += "" --ssl-ca=%s"" % shlex_quote(ssl_ca) if force: cmd += "" --force"" if socket is not None: cmd += "" --socket=%s"" % shlex_quote(socket) else: cmd += "" --host=%s --port=%i"" % (shlex_quote(host), port) if all_databases: cmd += "" --all-databases"" else: cmd += "" --databases {0} --skip-lock-tables"".format(' '.join(db_name)) if (encoding is not None) and (encoding != """"): cmd += "" --default-character-set=%s"" % shlex_quote(encoding) if single_transaction: cmd += "" --single-transaction=true"" if quick: cmd += "" --quick"" if create_new: cmd += "" --no-create-db"" if ignore_tables: for an_ignored_table in ignore_tables: cmd += "" --ignore-table={0}"".format(an_ignored_table) if hex_blob: cmd += "" --hex-blob"" path = None if os.path.splitext(target)[-1] == '.gz': path = module.get_bin_path('gzip', True) elif os.path.splitext(target)[-1] == '.bz2': path = module.get_bin_path('bzip2', True) elif os.path.splitext(target)[-1] == '.xz': path = module.get_bin_path('xz', True) if path: cmd = '%s | %s > %s' % (cmd, path, shlex_quote(target)) else: cmd += "" > %s"" % shlex_quote(target) executed_commands.append(cmd) rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) return rc, stdout, stderr ","def db_dump(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None, single_transaction=None, quick=None, ignore_tables=None, hex_blob=None, encoding=None, force=False, no_create_db=False): cmd = module.get_bin_path('mysqldump', True) # If defined, mysqldump demands --defaults-extra-file be the first option if config_file: cmd += "" --defaults-extra-file=%s"" % shlex_quote(config_file) if user is not None: cmd += "" --user=%s"" % shlex_quote(user) if password is not None: cmd += "" --password=%s"" % shlex_quote(password) if ssl_cert is not None: cmd += "" --ssl-cert=%s"" % shlex_quote(ssl_cert) if ssl_key is not None: cmd += "" --ssl-key=%s"" % shlex_quote(ssl_key) if ssl_ca is not None: cmd += "" --ssl-ca=%s"" % shlex_quote(ssl_ca) if force: cmd += "" --force"" if socket is not None: cmd += "" --socket=%s"" % shlex_quote(socket) else: cmd += "" --host=%s --port=%i"" % (shlex_quote(host), port) if all_databases: cmd += "" --all-databases"" else: cmd += "" --databases {0} --skip-lock-tables"".format(' '.join(db_name)) if (encoding is not None) and (encoding != """"): cmd += "" --default-character-set=%s"" % shlex_quote(encoding) if single_transaction: cmd += "" --single-transaction=true"" if quick: cmd += "" --quick"" if create_new: cmd += "" --no-create-db"" if ignore_tables: for an_ignored_table in ignore_tables: cmd += "" --ignore-table={0}"".format(an_ignored_table) if hex_blob: cmd += "" --hex-blob"" path = None if os.path.splitext(target)[-1] == '.gz': path = module.get_bin_path('gzip', True) elif os.path.splitext(target)[-1] == '.bz2': path = module.get_bin_path('bzip2', True) elif os.path.splitext(target)[-1] == '.xz': path = module.get_bin_path('xz', True) if path: cmd = '%s | %s > %s' % (cmd, path, shlex_quote(target)) else: cmd += "" > %s"" % shlex_quote(target) executed_commands.append(cmd) rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) return rc, stdout, stderr " 40910,"def pool_ta(tas, col, basename_prefix, out_dir): if len(tas) > 1: if basename_prefix is None: prefix = os.path.join(out_dir,'basename_prefix') else: prefix = os.path.join(out_dir, os.path.basename(strip_ext_ta(tas[0]))) pooled_ta = '{}.pooled.tagAlign.gz'.format(prefix) cmd = 'zcat -f {} | ' if col is not None: cmd += 'cut -f 1-{} | '.format(col) cmd += 'gzip -nc > {}' cmd = cmd.format( ' '.join(tas), pooled_ta) run_shell_cmd(cmd) return pooled_ta else: raise Exception('Needs at least two TAs (or BEDs) to be pooled.') ","def pool_ta(tas, col, basename_prefix, out_dir): if len(tas) > 1: if basename_prefix is None: prefix = os.path.join(out_dir,'basename_prefix') else: prefix = os.path.join(out_dir, os.path.basename(strip_ext_ta(tas[0]))) pooled_ta = '{}.pooled.tagAlign.gz'.format(prefix) cmd = 'zcat -f {} | ' if col is not None: cmd += 'cut -f 1-{} | '.format(col) cmd += 'gzip -nc > {}' cmd = cmd.format( ' '.join(tas), pooled_ta) run_shell_cmd(cmd) return pooled_ta else: raise ValueError('Needs at least two TAs (or BEDs) to be pooled.') " 40117,"def _add_file_object_only_fields(fo, meta): ''' Adds fields relevant for only FileObjects but not FirmwareObjects from fo to meta :param meta: The dictionary to add the fields to :param fo: A FileObject ''' if not isinstance(fo, Firmware): meta['firmwares_including_this_file'] = list(fo.parent_firmware_uids) meta['virtual_file_path'] = fo.get_virtual_paths_for_one_uid() ","def _add_file_object_only_fields(fo, meta): ''' Adds fields relevant for only :class:`objects.file.FileObject` but not Firmware objects from `fo` to meta :param meta: The dictionary to add the fields to :param fo: A FileObject ''' if not isinstance(fo, Firmware): meta['firmwares_including_this_file'] = list(fo.parent_firmware_uids) meta['virtual_file_path'] = fo.get_virtual_paths_for_one_uid() " 42562,"def test_query_token_with_info(rotkehlchen_api_server): """"""Query DAI token to retrieve basic information"""""" response = requests.get( api_url_for( rotkehlchen_api_server, ""erc20tokeninfo"", ), json={ 'address': string_to_ethereum_address(""0x6B175474E89094C44Da98b954EedeAC495271d0F""), }, ) assert_proper_response_with_result(response) data = response.json() assert data['result']['decimals'] == 18 assert data['result']['symbol'] == 'DAI' assert data['result']['name'] == 'Dai Stablecoin' ","def test_query_token_with_info(rotkehlchen_api_server): """"""Query DAI token to retrieve basic information"""""" response = requests.get( api_url_for( rotkehlchen_api_server, ""erc20tokeninfo"", ), json={ 'address': string_to_ethereum_address('0x6B175474E89094C44Da98b954EedeAC495271d0F'), }, ) assert_proper_response_with_result(response) data = response.json() assert data['result']['decimals'] == 18 assert data['result']['symbol'] == 'DAI' assert data['result']['name'] == 'Dai Stablecoin' " 36734,"def _get_machine_win32(): # Try to use the PROCESSOR_* environment variables # available on Win XP and later; see # http://support.microsoft.com/kb/888731 and # http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM # WOW64 processes mask the native architecture try: arch, = _wmi_query('CPU', 'Architecture') except OSError: pass else: try: arch = ['x86', 'MIPS', 'Alpha', 'PowerPC', None, 'ARM', 'ia64', None, None, 'AMD64', None, None, 'ARM64', ][int(arch)] except (ValueError, IndexError): pass else: if arch: return arch return ( os.environ.get('PROCESSOR_ARCHITEW6432', '') or os.environ.get('PROCESSOR_ARCHITECTURE', '') ) ","def _get_machine_win32(): # Try to use the PROCESSOR_* environment variables # available on Win XP and later; see # http://support.microsoft.com/kb/888731 and # http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM # WOW64 processes mask the native architecture try: arch = _wmi_query('CPU', 'Architecture')[0] except OSError: pass else: try: arch = ['x86', 'MIPS', 'Alpha', 'PowerPC', None, 'ARM', 'ia64', None, None, 'AMD64', None, None, 'ARM64', ][int(arch)] except (ValueError, IndexError): pass else: if arch: return arch return ( os.environ.get('PROCESSOR_ARCHITEW6432', '') or os.environ.get('PROCESSOR_ARCHITECTURE', '') ) " 6592,"def setup(company=None, patch=True): uae_custom_fields() add_print_formats() add_permissions() create_ksa_vat_setting(company) make_custom_fields() ","def setup(company=None, patch=True): uae_custom_fields() add_print_formats() add_permissions() create_ksa_vat_setting(company) make_qrcode_field() " 37606,"def call( target: Optional[Union[circuit.QuantumCircuit, Schedule, ScheduleBlock]] = None, name: Optional[str] = None, channels: Optional[List[chans.Channel]] = None, value_dict: Optional[Dict[ParameterValueType, ParameterValueType]] = None, **kw_params: ParameterValueType, ): """"""Call the subroutine within the currently active builder context with arbitrary parameters which will be assigned to the target program. .. note:: If the ``target`` program is instance of schedule or quantum cirucit, it will be assigned as :class:`~qiskit.pulse.instructions.Call` instruction. Otherwise :class:`~qiskit.pulse.instructions.Reference` instruction is added and ``target`` is separately registered to the references. Examples: 1. Call with substantial program. .. code-block:: python from qiskit import circuit, pulse, schedule, transpile from qiskit.test.mock import FakeOpenPulse2Q backend = FakeOpenPulse2Q() qc = circuit.QuantumCircuit(2) qc.cx(0, 1) qc_transpiled = transpile(qc, optimization_level=3) sched = schedule(qc_transpiled, backend) with pulse.build(backend) as pulse_prog: pulse.call(sched) pulse.call(qc) This function can optionally take parameter dictionary with the parameterized target program. .. code-block:: python from qiskit import circuit, pulse amp = circuit.Parameter('amp') with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, amp, 40), pulse.DriveChannel(0)) with pulse.build() as main_prog: pulse.call(subroutine, amp=0.1) pulse.call(subroutine, amp=0.3) If there is any parameter name collision, you can distinguish them by specifying each parameter object as a python dictionary. Otherwise ``amp1`` and ``amp2`` will be updated with the same value. .. code-block:: python from qiskit import circuit, pulse amp1 = circuit.Parameter('amp') amp2 = circuit.Parameter('amp') with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, amp1, 40), pulse.DriveChannel(0)) pulse.play(pulse.Gaussian(160, amp2, 40), pulse.DriveChannel(1)) with pulse.build() as main_prog: pulse.call(subroutine, value_dict={amp1: 0.1, amp2: 0.2}) 2. Call with unassigned program. .. code-block:: python qiskit import pulse with pulse.build() as main_prog: ref_key = ""my_subroutine"" pulse.call(name=ref_key, channels=[pulse.DriveChannel(0)]) with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, 0.1, 40), pulse.DriveChannel(0)) main_prog.assign_reference(ref_key=ref_key, schedule=subroutine) When you call without actual program, you can assign the program afterwards through the :meth:`ScheduleBlock.assign_reference` method. Args: target: Target circuit or pulse schedule to call. If this program is not provided, both ``name`` and ``channels`` should be provided instead. name: Name of subroutine if defined. channels: Optional. Channels associated to the subroutine. value_dict: Optional. Local scoped parameters assigned to the subroutine. If this dictionary is provided, the ``target`` program is copied and then stored in the main built schedule with having parameters assigned. This dictionary is keyed on the :class:`~.Parameter` object, thus parameter name collision can be avoided. This option is valid only when the subroutine is called with ``target``. kw_params: Alternative way to provide local scoped parameters. Since this is keyed on the string parameter name, the parameters having the same name are all updated together. If you want to avoid name collision, use ``value_dict`` with :class:`~.Parameter` object instead. Raises: exceptions.PulseError: If the input ``target`` type is not supported. exceptions.PulseError: Target program is empty and name and channels are not both provided. exceptions.PulseError: Subroutine is called by name and channels but local scoped parameters are also provided. """""" if target is None: if value_dict is not None or any(kw_params): raise exceptions.PulseError( ""Parameters are provided without target program. "" ""These parameters cannot be assigned."" ) if name is None or channels is None: raise exceptions.PulseError( ""Subroutine name and channels are not both provided. "" ""Please call subroutine with target program, or both name and channels."" ) _active_builder().append_reference(reference_key=name, channels=channels) else: if not isinstance(target, (circuit.QuantumCircuit, Schedule, ScheduleBlock)): raise exceptions.PulseError( f'Target of type ""{target.__class__.__name__}"" is not supported.' ) _active_builder().call_subroutine( subroutine=target, name=name, value_dict=value_dict, **kw_params ) ","def call( target: Optional[Union[circuit.QuantumCircuit, Schedule, ScheduleBlock]] = None, name: Optional[str] = None, channels: Optional[List[chans.Channel]] = None, value_dict: Optional[Dict[ParameterValueType, ParameterValueType]] = None, **kw_params: ParameterValueType, ): """"""Call the subroutine within the currently active builder context with arbitrary parameters which will be assigned to the target program. .. note:: If the ``target`` program is instance of schedule or quantum cirucit, it will be assigned as :class:`~qiskit.pulse.instructions.Call` instruction. Otherwise :class:`~qiskit.pulse.instructions.Reference` instruction is added and ``target`` is separately registered to the references. Examples: 1. Call with substantial program. .. code-block:: python from qiskit import circuit, pulse, schedule, transpile from qiskit.test.mock import FakeOpenPulse2Q backend = FakeOpenPulse2Q() qc = circuit.QuantumCircuit(2) qc.cx(0, 1) qc_transpiled = transpile(qc, optimization_level=3) sched = schedule(qc_transpiled, backend) with pulse.build(backend) as pulse_prog: pulse.call(sched) pulse.call(qc) This function can optionally take parameter dictionary with the parameterized target program. .. code-block:: python from qiskit import circuit, pulse amp = circuit.Parameter('amp') with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, amp, 40), pulse.DriveChannel(0)) with pulse.build() as main_prog: pulse.call(subroutine, amp=0.1) pulse.call(subroutine, amp=0.3) If there is any parameter name collision, you can distinguish them by specifying each parameter object as a python dictionary. Otherwise ``amp1`` and ``amp2`` will be updated with the same value. .. code-block:: python from qiskit import circuit, pulse amp1 = circuit.Parameter('amp') amp2 = circuit.Parameter('amp') with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, amp1, 40), pulse.DriveChannel(0)) pulse.play(pulse.Gaussian(160, amp2, 40), pulse.DriveChannel(1)) with pulse.build() as main_prog: pulse.call(subroutine, value_dict={amp1: 0.1, amp2: 0.2}) 2. Call with unassigned program. .. code-block:: python qiskit import pulse with pulse.build() as main_prog: ref_key = ""my_subroutine"" pulse.call(name=ref_key, channels=[pulse.DriveChannel(0)]) with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, 0.1, 40), pulse.DriveChannel(0)) main_prog.assign_reference(ref_key=ref_key, schedule=subroutine) When you call without actual program, you can assign the program afterwards through the :meth:`ScheduleBlock.assign_reference` method. Args: target: Target circuit or pulse schedule to call. If this program is not provided, both ``name`` and ``channels`` should be provided instead. name: Name of subroutine if defined. channels: Optional. Channels associated to the subroutine. value_dict: Optional. Local scoped parameters assigned to the subroutine. If this dictionary is provided, the ``target`` program is copied and then stored in the main built schedule and its parameters are assigned to the given values. This dictionary is keyed on the :class:`~.Parameter` object, thus parameter name collision can be avoided. This option is valid only when the subroutine is called with ``target``. kw_params: Alternative way to provide local scoped parameters. Since this is keyed on the string parameter name, the parameters having the same name are all updated together. If you want to avoid name collision, use ``value_dict`` with :class:`~.Parameter` object instead. Raises: exceptions.PulseError: If the input ``target`` type is not supported. exceptions.PulseError: Target program is empty and name and channels are not both provided. exceptions.PulseError: Subroutine is called by name and channels but local scoped parameters are also provided. """""" if target is None: if value_dict is not None or any(kw_params): raise exceptions.PulseError( ""Parameters are provided without target program. "" ""These parameters cannot be assigned."" ) if name is None or channels is None: raise exceptions.PulseError( ""Subroutine name and channels are not both provided. "" ""Please call subroutine with target program, or both name and channels."" ) _active_builder().append_reference(reference_key=name, channels=channels) else: if not isinstance(target, (circuit.QuantumCircuit, Schedule, ScheduleBlock)): raise exceptions.PulseError( f'Target of type ""{target.__class__.__name__}"" is not supported.' ) _active_builder().call_subroutine( subroutine=target, name=name, value_dict=value_dict, **kw_params ) " 27994,"def overwrite_cppcheck_report_hash(reports, plist_file): """"""CppCheck generates a '0' value for the bug hash. In case all of the reports in a plist file contain only a hash with '0' value oeverwrite the hash values in the plist report files with a context free hash value. """""" rep_hash = [rep.report_hash == '0' for rep in reports] if all(rep_hash): replace_report_hash(plist_file, HashType.CONTEXT_FREE) return True return False ","def overwrite_cppcheck_report_hash(reports, plist_file): """"""CppCheck generates a '0' value for the bug hash. In case all of the reports in a plist file contain only a hash with '0' value overwrite the hash values in the plist report files with a context free hash value. """""" rep_hash = [rep.report_hash == '0' for rep in reports] if all(rep_hash): replace_report_hash(plist_file, HashType.CONTEXT_FREE) return True return False " 56006,"def convert_to_localized_md(model_list, localized_model_list, format_str): """"""Convert `model_list` to each localized README."""""" def _rep(match): title, model_link, paper_affiliations, paper_title_link, paper_authors, supplements = match.groups() return format_str.format( title=title, model_link=model_link, paper_affiliations=paper_affiliations, paper_title_link=paper_title_link, paper_authors=paper_authors, supplements="" "" + supplements.strip() if len(supplements) != 0 else """", ) # This regex captures metadata from an English model description, including model title, model link, # affiliations of the paper, title of the paper, authors of the paper, and supplemental data (see DistilBERT for example). _re_capture_meta = re.compile( r""\*\*\[([^\]]*)\]\(([^\)]*)\)\*\* \(from ([^)]*)\)[^\[]*([^\)]*\)).*?by (.*?[A-Za-z\*]{2,}?)\. (.*)$"" ) # This regex is used to synchronize link. _re_capture_title_link = re.compile(r""\*\*\[([^\]]*)\]\(([^\)]*)\)\*\*"") num_models_equal = True if len(localized_model_list) == 0: localized_model_index = {} else: try: localized_model_index = { re.search(r""\*\*\[([^\]]*)"", line).groups()[0]: line for line in localized_model_list.strip().split(""\n"") } except AttributeError: raise AttributeError(""A model name in localized READMEs cannot be recognized."") for model in model_list.strip().split(""\n""): title, model_link = re.search(_re_capture_title_link, model).groups() if title not in localized_model_index: num_models_equal = False # Add an anchor white space behind a model description string for regex. # If metadata cannot be captured, the English version will be directly copied. localized_model_index[title] = re.sub(_re_capture_meta, _rep, model + "" "") else: # Synchronize link localized_model_index[title] = re.sub( _re_capture_title_link, f""**[{title}]({model_link})**"", localized_model_index[title], count=1 ) sorted_index = sorted(localized_model_index.items(), key=lambda x: x[0].lower()) return num_models_equal, ""\n"".join(map(lambda x: x[1], sorted_index)) + ""\n"" ","def convert_to_localized_md(model_list, localized_model_list, format_str): """"""Convert `model_list` to each localized README."""""" def _rep(match): title, model_link, paper_affiliations, paper_title_link, paper_authors, supplements = match.groups() return format_str.format( title=title, model_link=model_link, paper_affiliations=paper_affiliations, paper_title_link=paper_title_link, paper_authors=paper_authors, supplements="" "" + supplements.strip() if len(supplements) != 0 else """", ) # This regex captures metadata from an English model description, including model title, model link, # affiliations of the paper, title of the paper, authors of the paper, and supplemental data (see DistilBERT for example). _re_capture_meta = re.compile( r""\*\*\[([^\]]*)\]\(([^\)]*)\)\*\* \(from ([^)]*)\)[^\[]*([^\)]*\)).*?by (.*?[A-Za-z\*]{2,}?)\. (.*)$"" ) # This regex is used to synchronize link. _re_capture_title_link = re.compile(r""\*\*\[([^\]]*)\]\(([^\)]*)\)\*\*"") num_models_equal = True if len(localized_model_list) == 0: localized_model_index = {} else: try: localized_model_index = { re.search(r""\*\*\[([^\]]*)"", line).groups()[0]: line for line in localized_model_list.strip().split(""\n"") } except AttributeError: raise AttributeError(""A model name in localized READMEs cannot be recognized."") for model in model_list.strip().split(""\n""): title, model_link = re.search(_re_capture_title_link, model).groups() if title not in localized_model_index: num_models_equal = False # Add an anchor white space behind a model description string for regex. # If metadata cannot be captured, the English version will be directly copied. localized_model_index[title] = re.sub(_re_capture_meta, _rep, model + "" "") else: # Synchronize link localized_model_index[title] = _re_capture_title_link.sub( f""**[{title}]({model_link})**"", localized_model_index[title], count=1 ) sorted_index = sorted(localized_model_index.items(), key=lambda x: x[0].lower()) return num_models_equal, ""\n"".join(map(lambda x: x[1], sorted_index)) + ""\n"" " 55629,"def solarize(input: torch.Tensor, thresholds: Union[float, torch.Tensor] = 0.5, additions: Optional[Union[float, torch.Tensor]] = None) -> torch.Tensor: r"""""" For each pixel in the image less than threshold, we add 'addition' amount to it and then clip the pixel value to be between 0 and 1.0. The value of 'addition' is between -0.5 and 0.5. Args: input (torch.Tensor): image tensor with shapes like (C, H, W) or (B, C, H, W) to solarize. thresholds (float or torch.Tensor): solarize thresholds. If int or one element tensor, input will be solarized across the whole batch. If 1-d tensor, input will be solarized element-wise, len(thresholds) == len(input). additions (optional, float or torch.Tensor): between -0.5 and 0.5. Default None. If None, no addition will be performed. If int or one element tensor, same addition will be added across the whole batch. If 1-d tensor, additions will be added element-wisely, len(additions) == len(input). Returns: torch.Tensor: Solarized images. """""" if not torch.is_tensor(input): raise TypeError(f""Input type is not a torch.Tensor. Got {type(input)}"") if not isinstance(thresholds, (float, torch.Tensor,)): raise TypeError(f""The factor should be either a float or torch.Tensor. "" f""Got {type(thresholds)}"") if isinstance(thresholds, float): thresholds = torch.tensor(thresholds) if additions is not None: if not isinstance(additions, (float, torch.Tensor,)): raise TypeError(f""The factor should be either a float or torch.Tensor. "" f""Got {type(additions)}"") if isinstance(additions, float): additions = torch.tensor(additions) assert torch.all((additions < 0.5) * (additions > -0.5)), \ f""The value of 'addition' is between -0.5 and 0.5. Got {additions}."" if isinstance(additions, torch.Tensor) and len(additions.shape) != 0: assert input.size(0) == len(additions) and len(additions.shape) == 1, \ f""additions must be a 1-d vector of shape ({input.size(0)},). Got {additions}"" # TODO: I am not happy about this line, but no easy to do batch-wise operation additions = torch.stack([x.expand(*input.shape[1:]) for x in additions]) additions = additions.to(input.device).to(input.dtype) input = input + additions input = input.clamp(0., 1.) return _solarize(input, thresholds) ","def solarize(input: torch.Tensor, thresholds: Union[float, torch.Tensor] = 0.5, additions: Optional[Union[float, torch.Tensor]] = None) -> torch.Tensor: r"""""" For each pixel in the image less than threshold, we add 'addition' amount to it and then clip the pixel value to be between 0 and 1.0. The value of 'addition' is between -0.5 and 0.5. Args: input (torch.Tensor): image tensor with shapes like (C, H, W) or (B, C, H, W) to solarize. thresholds (float or torch.Tensor): solarize thresholds. If int or one element tensor, input will be solarized across the whole batch. If 1-d tensor, input will be solarized element-wise, len(thresholds) == len(input). additions (optional, float or torch.Tensor): between -0.5 and 0.5. Default None. If None, no addition will be performed. If int or one element tensor, same addition will be added across the whole batch. If 1-d tensor, additions will be added element-wisely, len(additions) == len(input). Returns: torch.Tensor: Solarized images. """""" if not torch.is_tensor(input): raise TypeError(f""Input type is not a torch.Tensor. Got {type(input)}"") if not isinstance(thresholds, (float, torch.Tensor,)): raise TypeError(f""The factor should be either a float or torch.Tensor. "" f""Got {type(thresholds)}"") if isinstance(thresholds, float): thresholds = torch.tensor(thresholds) if additions is not None: if not isinstance(additions, (float, torch.Tensor,)): raise TypeError(f""The factor should be either a float or torch.Tensor. "" f""Got {type(additions)}"") if isinstance(additions, float): additions = torch.tensor(additions) assert torch.all((additions < 0.5) * (additions > -0.5)), \ f""The value of 'addition' is between -0.5 and 0.5. Got {additions}."" if isinstance(additions, torch.Tensor) and len(additions.shape) != 0: assert input.size(0) == len(additions) and len(additions.shape) == 1, \ f""additions must be a 1-d vector of shape ({input.size(0)},). Got {additions}"" # TODO: I am not happy about this line, but no easy to do batch-wise operation additions = additions.to(input.device).to(input.dtype) additions = torch.stack([x.expand(*input.shape[1:]) for x in additions]) input = input + additions input = input.clamp(0., 1.) return _solarize(input, thresholds) " 42647,"def load_config(): env_config = load_config_from_env() file_config = load_config_from_file() if file_config is not None: logger.info('loading config from file') loglevel = file_config.get('loglevel') logfromothermodules = file_config.get('logfromothermodules') sleep_secs = file_config.get('sleep-secs') max_logfiles_num = file_config.get('max_logfiles_num') max_size_in_mb_all_logs = file_config.get('max_size_in_mb_all_logs') else: logger.info('loading config from env') loglevel = env_config.get('loglevel') logfromothermodules = env_config.get('logfromothermodules') sleep_secs = env_config.get('sleep_secs') max_logfiles_num = env_config.get('max_logfiles_num') max_size_in_mb_all_logs = env_config.get('max_size_in_mb_all_logs') args = [ '--data-dir', '/data', '--logfile', '/logs/rotki.log', '--loglevel', loglevel if loglevel is not None else DEFAULT_LOG_LEVEL ] if logfromothermodules is not None and logfromothermodules is True: args.append('--logfromothermodules') if sleep_secs is not None: args.append('--sleep-secs') args.append(str(sleep_secs)) if max_logfiles_num is not None: args.append('--max-logfiles-num') args.append(str(max_logfiles_num)) if max_size_in_mb_all_logs is not None: args.append('--max-size-in-mb-all-logs') args.append(str(max_size_in_mb_all_logs)) return args ","def load_config(): env_config = load_config_from_env() file_config = load_config_from_file() if file_config is not None: logger.info('loading config from file') loglevel = file_config.get('loglevel') logfromothermodules = file_config.get('logfromothermodules') sleep_secs = file_config.get('sleep-secs') max_logfiles_num = file_config.get('max_logfiles_num') max_size_in_mb_all_logs = file_config.get('max_size_in_mb_all_logs') else: logger.info('loading config from env') loglevel = env_config.get('loglevel') logfromothermodules = env_config.get('logfromothermodules') sleep_secs = env_config.get('sleep_secs') max_logfiles_num = env_config.get('max_logfiles_num') max_size_in_mb_all_logs = env_config.get('max_size_in_mb_all_logs') args = [ '--data-dir', '/data', '--logfile', '/logs/rotki.log', '--loglevel', loglevel if loglevel is not None else DEFAULT_LOG_LEVEL ] if logfromothermodules is True: args.append('--logfromothermodules') if sleep_secs is not None: args.append('--sleep-secs') args.append(str(sleep_secs)) if max_logfiles_num is not None: args.append('--max-logfiles-num') args.append(str(max_logfiles_num)) if max_size_in_mb_all_logs is not None: args.append('--max-size-in-mb-all-logs') args.append(str(max_size_in_mb_all_logs)) return args " 43934,"def _hermite_coulomb(t, u, v, n, p, dr): """"""Evaluate Hermite integral needed to compute the nuclear attraction and electron repulsion integrals. These integrals are computed recursively starting from the Boys function [`Helgaker (1995) p817 `_]: .. math:: R_{000}^n = (-2p)^n F_n(pR_{CP}^2), where :math:`F_n` is the Boys function, :math:`p` is computed from the exponents of the two Gaussian functions as :math:`p = \alpha + \beta`, and :math:`R_{CP}` is the distance between the center of the composite Gaussian centered at :math:`P` and the electrostatic potential at :math:`C`. The following recursive equations are used to compute the evaluate the higher order Hermite integrals .. math:: R_{t+1, u, v}^n = t R_{t-1, u, v}^{n+1} + x R_{t, u, v}^{n+1} R_{t, u+1, v}^n = u R_{t, u-1, v}^{n+1} + y R_{t, u, v}^{n+1} R_{t, u, v+1}^n = v R_{t, u, v-1}^{n+1} + z R_{t, u, v}^{n+1} where :math:`x`, :math:`y` and :math:`z` are the Cartesian components of :math:`R_{CP}`. Args: t (integer): order of Hermite derivative in x u (integer): order of Hermite derivative in y v (float): order of Hermite derivative in z n (integer): order of the Boys function p (float): sum of the Gaussian exponents dr (array[float]): distance between the center of the composite Gaussian and the nucleus Returns: array[float]: value of the Hermite integral """""" x, y, z = dr[0], dr[1], dr[2] T = p * (dr ** 2).sum(axis=0) r = 0 if t == u == v == 0: f = [] for term in T.flatten(): f.append(_boys(n, term)) return ((-2 * p) ** n) * anp.array(f).reshape(T.shape) if t == u == 0: if v > 1: r = r + (v - 1) * _hermite_coulomb(t, u, v - 2, n + 1, p, dr) r = r + z * _hermite_coulomb(t, u, v - 1, n + 1, p, dr) return r if t == 0: if u > 1: r = r + (u - 1) * _hermite_coulomb(t, u - 2, v, n + 1, p, dr) r = r + y * _hermite_coulomb(t, u - 1, v, n + 1, p, dr) return r if t > 1: r = r + (t - 1) * _hermite_coulomb(t - 2, u, v, n + 1, p, dr) r = r + x * _hermite_coulomb(t - 1, u, v, n + 1, p, dr) return r ","def _hermite_coulomb(t, u, v, n, p, dr): """"""Evaluate Hermite integral needed to compute the nuclear attraction and electron repulsion integrals. These integrals are computed recursively starting from the Boys function [`Helgaker (1995) p817 `_]: .. math:: R_{000}^n = (-2p)^n F_n(pR_{CP}^2), where :math:`F_n` is the Boys function, :math:`p` is computed from the exponents of the two Gaussian functions as :math:`p = \alpha + \beta`, and :math:`R_{CP}` is the distance between the center of the composite Gaussian centered at :math:`P` and the electrostatic potential at :math:`C`. The following recursive equations are used to compute the evaluate the higher order Hermite integrals .. math:: R_{t+1, u, v}^n = t R_{t-1, u, v}^{n+1} + x R_{t, u, v}^{n+1} R_{t, u+1, v}^n = u R_{t, u-1, v}^{n+1} + y R_{t, u, v}^{n+1}, R_{t, u, v+1}^n = v R_{t, u, v-1}^{n+1} + z R_{t, u, v}^{n+1} where :math:`x`, :math:`y` and :math:`z` are the Cartesian components of :math:`R_{CP}`. Args: t (integer): order of Hermite derivative in x u (integer): order of Hermite derivative in y v (float): order of Hermite derivative in z n (integer): order of the Boys function p (float): sum of the Gaussian exponents dr (array[float]): distance between the center of the composite Gaussian and the nucleus Returns: array[float]: value of the Hermite integral """""" x, y, z = dr[0], dr[1], dr[2] T = p * (dr ** 2).sum(axis=0) r = 0 if t == u == v == 0: f = [] for term in T.flatten(): f.append(_boys(n, term)) return ((-2 * p) ** n) * anp.array(f).reshape(T.shape) if t == u == 0: if v > 1: r = r + (v - 1) * _hermite_coulomb(t, u, v - 2, n + 1, p, dr) r = r + z * _hermite_coulomb(t, u, v - 1, n + 1, p, dr) return r if t == 0: if u > 1: r = r + (u - 1) * _hermite_coulomb(t, u - 2, v, n + 1, p, dr) r = r + y * _hermite_coulomb(t, u - 1, v, n + 1, p, dr) return r if t > 1: r = r + (t - 1) * _hermite_coulomb(t - 2, u, v, n + 1, p, dr) r = r + x * _hermite_coulomb(t - 1, u, v, n + 1, p, dr) return r " 29893,"def error_reporter_from_config(raw_config: config.RawConfig, module_name: str) -> raven.Client: """"""Configure and return a error reporter. This expects one configuration option and can take many optional ones: ``sentry.dsn`` The DSN provided by Sentry. If blank, the reporter will discard events. ``sentry.site`` (optional) An arbitrary string to identify this client installation. ``sentry.environment`` (optional) The environment your application is running in. ``sentry.exclude_paths`` (optional) Comma-delimited list of module prefixes to ignore when discovering where an error came from. ``sentry.include_paths`` (optional) Comma-delimited list of paths to include for consideration when drilling down to an exception. ``sentry.ignore_exceptions`` (optional) Comma-delimited list of fully qualified names of exception classes (potentially with * globs) to not report. ``sentry.sample_rate`` (optional) Percentage of errors to report. (e.g. ""37%"") ``sentry.processors`` (optional) Comma-delimited list of fully qualified names of processor classes to apply to events before sending to Sentry. Example usage:: error_reporter_from_config(app_config, __name__) :param raw_config: The application configuration which should have settings for the error reporter. :param module_name: ``__name__`` of the root module of the application. """""" cfg = config.parse_config( raw_config, { ""sentry"": { ""dsn"": config.Optional(config.String, default=None), ""site"": config.Optional(config.String, default=None), ""environment"": config.Optional(config.String, default=None), ""include_paths"": config.Optional(config.String, default=None), ""exclude_paths"": config.Optional(config.String, default=None), ""ignore_exceptions"": config.Optional( config.TupleOf(config.String), default=[] ), # Depricated in favor of `additional_ignore_exception ""additional_ignore_exceptions"": config.Optional( config.TupleOf(config.String), default=[] ), ""sample_rate"": config.Optional(config.Percent, default=1), ""processors"": config.Optional( config.TupleOf(config.String), default=[""raven.processors.SanitizePasswordsProcessor""], ), } }, ) application_module = sys.modules[module_name] module_path = os.path.abspath(application_module.__file__) directory = os.path.dirname(module_path) release = None while directory != ""/"": try: release = raven.fetch_git_sha(directory) except raven.exceptions.InvalidGitRepository: directory = os.path.dirname(directory) else: break cfg_ignore_exceptions = cfg.sentry.ignore_exceptions cfg_additional_ignore_exceptions = cfg.sentry.additional_ignore_exceptions if cfg_additional_ignore_exceptions and cfg_ignore_exceptions: raise config.ConfigurationError( ""sentry.ignore_exceptions"", ""Can not define 'sentry.ignore_exceptions' and 'sentry.additional_ignore_exceptions'"", ) all_ignore_exceptions = cfg_ignore_exceptions or list(ALWAYS_IGNORE_EXCEPTIONS) if cfg_additional_ignore_exceptions: all_ignore_exceptions.extend(cfg_additional_ignore_exceptions) # pylint: disable=maybe-no-member client = raven.Client( dsn=cfg.sentry.dsn, site=cfg.sentry.site, release=release, environment=cfg.sentry.environment, include_paths=cfg.sentry.include_paths, exclude_paths=cfg.sentry.exclude_paths, ignore_exceptions=all_ignore_exceptions, sample_rate=cfg.sentry.sample_rate, processors=cfg.sentry.processors, ) client.ignore_exceptions.add(""ServerTimeout"") return client ","def error_reporter_from_config(raw_config: config.RawConfig, module_name: str) -> raven.Client: """"""Configure and return a error reporter. This expects one configuration option and can take many optional ones: ``sentry.dsn`` The DSN provided by Sentry. If blank, the reporter will discard events. ``sentry.site`` (optional) An arbitrary string to identify this client installation. ``sentry.environment`` (optional) The environment your application is running in. ``sentry.exclude_paths`` (optional) Comma-delimited list of module prefixes to ignore when discovering where an error came from. ``sentry.include_paths`` (optional) Comma-delimited list of paths to include for consideration when drilling down to an exception. ``sentry.ignore_exceptions`` (optional) Comma-delimited list of fully qualified names of exception classes (potentially with * globs) to not report. ``sentry.sample_rate`` (optional) Percentage of errors to report. (e.g. ""37%"") ``sentry.processors`` (optional) Comma-delimited list of fully qualified names of processor classes to apply to events before sending to Sentry. Example usage:: error_reporter_from_config(app_config, __name__) :param raw_config: The application configuration which should have settings for the error reporter. :param module_name: ``__name__`` of the root module of the application. """""" cfg = config.parse_config( raw_config, { ""sentry"": { ""dsn"": config.Optional(config.String, default=None), ""site"": config.Optional(config.String, default=None), ""environment"": config.Optional(config.String, default=None), ""include_paths"": config.Optional(config.String, default=None), ""exclude_paths"": config.Optional(config.String, default=None), ""ignore_exceptions"": config.Optional( config.TupleOf(config.String), default=[] ), # Deprecated in favor of additional_ignore_exception ""additional_ignore_exceptions"": config.Optional( config.TupleOf(config.String), default=[] ), ""sample_rate"": config.Optional(config.Percent, default=1), ""processors"": config.Optional( config.TupleOf(config.String), default=[""raven.processors.SanitizePasswordsProcessor""], ), } }, ) application_module = sys.modules[module_name] module_path = os.path.abspath(application_module.__file__) directory = os.path.dirname(module_path) release = None while directory != ""/"": try: release = raven.fetch_git_sha(directory) except raven.exceptions.InvalidGitRepository: directory = os.path.dirname(directory) else: break cfg_ignore_exceptions = cfg.sentry.ignore_exceptions cfg_additional_ignore_exceptions = cfg.sentry.additional_ignore_exceptions if cfg_additional_ignore_exceptions and cfg_ignore_exceptions: raise config.ConfigurationError( ""sentry.ignore_exceptions"", ""Can not define 'sentry.ignore_exceptions' and 'sentry.additional_ignore_exceptions'"", ) all_ignore_exceptions = cfg_ignore_exceptions or list(ALWAYS_IGNORE_EXCEPTIONS) if cfg_additional_ignore_exceptions: all_ignore_exceptions.extend(cfg_additional_ignore_exceptions) # pylint: disable=maybe-no-member client = raven.Client( dsn=cfg.sentry.dsn, site=cfg.sentry.site, release=release, environment=cfg.sentry.environment, include_paths=cfg.sentry.include_paths, exclude_paths=cfg.sentry.exclude_paths, ignore_exceptions=all_ignore_exceptions, sample_rate=cfg.sentry.sample_rate, processors=cfg.sentry.processors, ) client.ignore_exceptions.add(""ServerTimeout"") return client " 25180,"def infer_typing_namedtuple( node: nodes.Call, context: Optional[InferenceContext] = None ) -> Iterator[nodes.ClassDef]: """"""Infer a typing.NamedTuple(...) call. We do so premature checking of the node to see if we don't run into any unexpected values. """""" try: func = next(node.func.infer()) except (InferenceError, StopIteration) as exc: raise UseInferenceDefault from exc if func.qname() != ""typing.NamedTuple"": raise UseInferenceDefault if len(node.args) != 2: raise UseInferenceDefault if not isinstance(node.args[1], (nodes.List, nodes.Tuple)): raise UseInferenceDefault return infer_named_tuple(node, context) ","def infer_typing_namedtuple( node: nodes.Call, context: Optional[InferenceContext] = None ) -> Iterator[nodes.ClassDef]: """"""Infer a typing.NamedTuple(...) call. We do so premature checking of the node to see if we don't run into any unexpected We do some premature checking of the node to see if we don't run into any unexpected """""" try: func = next(node.func.infer()) except (InferenceError, StopIteration) as exc: raise UseInferenceDefault from exc if func.qname() != ""typing.NamedTuple"": raise UseInferenceDefault if len(node.args) != 2: raise UseInferenceDefault if not isinstance(node.args[1], (nodes.List, nodes.Tuple)): raise UseInferenceDefault return infer_named_tuple(node, context) " 556,"def sync_webuser_usercases_if_applicable(user_id, spawn_task): user = CouchUser.get_by_user_id(user_id) for domain in user.get_domains(): domain_obj = Domain.get_by_name(domain) if domain_obj.call_center_config.enabled or domain_obj.usercase_enabled: if spawn_task: sync_webuser_usercases_task.delay(user, domain_obj) else: sync_webuser_usercases_task(user, domain_obj) ","def sync_webuser_usercases_if_applicable(user_id, spawn_task): user = CouchUser.get_by_user_id(user_id) for domain in user.get_domains(): domain_obj = Domain.get_by_name(domain) if domain_obj.usercase_enabled: if spawn_task: sync_webuser_usercases_task.delay(user, domain_obj) else: sync_webuser_usercases_task(user, domain_obj) " 40547,"def load_command_table(self, _): from azext_dnc.generated._client_factory import cf_controller dnc_controller = CliCommandType( operations_tmpl='azext_dnc.vendored_sdks.dnc.operations._controller_operations#ControllerOperations.{}', client_factory=cf_controller) with self.command_group('dnc controller', dnc_controller, client_factory=cf_controller) as g: g.custom_show_command('show', 'dnc_controller_show') g.custom_command('create', 'dnc_controller_create', supports_no_wait=True) g.custom_command('delete', 'dnc_controller_delete', supports_no_wait=True, confirmation=True) g.custom_wait_command('wait', 'dnc_controller_show') from azext_dnc.generated._client_factory import cf_delegated_network dnc_delegated_network = CliCommandType( operations_tmpl='azext_dnc.vendored_sdks.dnc.operations._delegated_network_operations#DelegatedNetworkOperation' 's.{}', client_factory=cf_delegated_network) with self.command_group('dnc delegated-network', dnc_delegated_network, client_factory=cf_delegated_network) as g: g.custom_command('list', 'dnc_delegated_network_list') from azext_dnc.generated._client_factory import cf_orchestrator_instance_service dnc_orchestrator_instance_service = CliCommandType( operations_tmpl='azext_dnc.vendored_sdks.dnc.operations._orchestrator_instance_service_operations#OrchestratorI' 'nstanceServiceOperations.{}', client_factory=cf_orchestrator_instance_service) with self.command_group('dnc orchestrator-instance-service', dnc_orchestrator_instance_service, client_factory=cf_orchestrator_instance_service) as g: g.custom_command('list', 'dnc_orchestrator_instance_service_list') g.custom_show_command('show', 'dnc_orchestrator_instance_service_show') g.custom_command('create', 'dnc_orchestrator_instance_service_create', supports_no_wait=True) g.custom_command('delete', 'dnc_orchestrator_instance_service_delete', supports_no_wait=True, confirmation=True) g.custom_wait_command('wait', 'dnc_orchestrator_instance_service_show') from azext_dnc.generated._client_factory import cf_delegated_subnet_service dnc_delegated_subnet_service = CliCommandType( operations_tmpl='azext_dnc.vendored_sdks.dnc.operations._delegated_subnet_service_operations#DelegatedSubnetSer' 'viceOperations.{}', client_factory=cf_delegated_subnet_service) with self.command_group('dnc delegated-subnet-service', dnc_delegated_subnet_service, client_factory=cf_delegated_subnet_service) as g: g.custom_command('list', 'dnc_delegated_subnet_service_list') g.custom_show_command('show', 'dnc_delegated_subnet_service_show') g.custom_command('create', 'dnc_delegated_subnet_service_create', supports_no_wait=True) g.custom_command('delete', 'dnc_delegated_subnet_service_delete', supports_no_wait=True, confirmation=True) g.custom_wait_command('wait', 'dnc_delegated_subnet_service_show') with self.command_group('dnc', is_experimental=False): pass ","def load_command_table(self, _): from azext_dnc.generated._client_factory import cf_controller dnc_controller = CliCommandType( operations_tmpl='azext_dnc.vendored_sdks.dnc.operations._controller_operations#ControllerOperations.{}', client_factory=cf_controller) with self.command_group('dnc controller', dnc_controller, client_factory=cf_controller) as g: g.custom_show_command('show', 'dnc_controller_show') g.custom_command('create', 'dnc_controller_create', supports_no_wait=True) g.custom_command('delete', 'dnc_controller_delete', supports_no_wait=True, confirmation=True) g.custom_wait_command('wait', 'dnc_controller_show') from azext_dnc.generated._client_factory import cf_delegated_network dnc_delegated_network = CliCommandType( operations_tmpl='azext_dnc.vendored_sdks.dnc.operations._delegated_network_operations#DelegatedNetworkOperation' 's.{}', client_factory=cf_delegated_network) with self.command_group('dnc delegated-network', dnc_delegated_network, client_factory=cf_delegated_network) as g: g.custom_command('list', 'dnc_delegated_network_list') from azext_dnc.generated._client_factory import cf_orchestrator_instance_service dnc_orchestrator_instance_service = CliCommandType( operations_tmpl='azext_dnc.vendored_sdks.dnc.operations._orchestrator_instance_service_operations#OrchestratorI' 'nstanceServiceOperations.{}', client_factory=cf_orchestrator_instance_service) with self.command_group('dnc orchestrator-instance-service', dnc_orchestrator_instance_service, client_factory=cf_orchestrator_instance_service) as g: g.custom_command('list', 'dnc_orchestrator_instance_service_list') g.custom_show_command('show', 'dnc_orchestrator_instance_service_show') g.custom_command('create', 'dnc_orchestrator_instance_service_create', supports_no_wait=True) g.custom_command('delete', 'dnc_orchestrator_instance_service_delete', supports_no_wait=True, confirmation=True) g.custom_wait_command('wait', 'dnc_orchestrator_instance_service_show') from azext_dnc.generated._client_factory import cf_delegated_subnet_service dnc_delegated_subnet_service = CliCommandType( operations_tmpl='azext_dnc.vendored_sdks.dnc.operations._delegated_subnet_service_operations#DelegatedSubnetSer' 'viceOperations.{}', client_factory=cf_delegated_subnet_service) with self.command_group('dnc delegated-subnet-service', dnc_delegated_subnet_service, client_factory=cf_delegated_subnet_service) as g: g.custom_command('list', 'dnc_delegated_subnet_service_list') g.custom_show_command('show', 'dnc_delegated_subnet_service_show') g.custom_command('create', 'dnc_delegated_subnet_service_create', supports_no_wait=True) g.custom_command('delete', 'dnc_delegated_subnet_service_delete', supports_no_wait=True, confirmation=True) g.custom_wait_command('wait', 'dnc_delegated_subnet_service_show') with self.command_group('dnc', is_preview=True): pass " 17707,"def _p(p: str) -> str: """"""A helper to code paths as POSIX paths in tests below. Would prepend fake drive C: to absolute paths on Windows"""""" if on_windows: pm = p.replace('/', os.sep) if p.startswith('/'): return ""C:{pm}"" else: return pm return p ","def _p(p: str) -> str: """"""A helper to code paths as POSIX paths in tests below. Would prepend fake drive C: to absolute paths on Windows"""""" if on_windows: pm = p.replace('/', os.sep) if p.startswith('/'): return f""C:{pm}"" else: return pm return p " 3021,"def interpolate_1d_fill( values, method=""pad"", axis=0, limit=None, limit_area=None, fill_value=None, dtype=None, ): """""" This is a 1D-versoin of `interpolate_2d`, which is used for methods `pad` and `backfill` when interpolating. This 1D-version is necessary to be able to handle kwarg `limit_area` via the function ` _derive_indices_of_nans_to_preserve`. It is used the same way as the 1D-interpolation functions which are based on scipy-interpolation, i.e. via np.apply_along_axis. """""" if method == ""pad"": limit_direction = ""forward"" elif method == ""backfill"": limit_direction = ""backward"" else: raise ValueError(""`method` must be either 'pad' or 'backfill'."") orig_values = values yvalues = values invalid = isna(yvalues) valid = ~invalid if values.ndim > 1: raise AssertionError(""This only works with 1D data."") if fill_value is None: mask = None else: # todo create faster fill func without masking mask = mask_missing(values, fill_value) preserve_nans = _derive_indices_of_nans_to_preserve( yvalues=yvalues, valid=valid, invalid=invalid, limit=limit, limit_area=limit_area, limit_direction=limit_direction, ) method = clean_fill_method(method) if method == ""pad"": values = pad_1d(values, limit=limit, mask=mask, dtype=dtype) else: values = backfill_1d(values, limit=limit, mask=mask, dtype=dtype) if orig_values.dtype.kind == ""M"": # convert float back to datetime64 values = values.astype(orig_values.dtype) values[preserve_nans] = fill_value return values ","def interpolate_1d_fill( values, method=""pad"", axis=0, limit: Optional[int] = None, limit_area=None, fill_value=None, dtype=None, ): """""" This is a 1D-versoin of `interpolate_2d`, which is used for methods `pad` and `backfill` when interpolating. This 1D-version is necessary to be able to handle kwarg `limit_area` via the function ` _derive_indices_of_nans_to_preserve`. It is used the same way as the 1D-interpolation functions which are based on scipy-interpolation, i.e. via np.apply_along_axis. """""" if method == ""pad"": limit_direction = ""forward"" elif method == ""backfill"": limit_direction = ""backward"" else: raise ValueError(""`method` must be either 'pad' or 'backfill'."") orig_values = values yvalues = values invalid = isna(yvalues) valid = ~invalid if values.ndim > 1: raise AssertionError(""This only works with 1D data."") if fill_value is None: mask = None else: # todo create faster fill func without masking mask = mask_missing(values, fill_value) preserve_nans = _derive_indices_of_nans_to_preserve( yvalues=yvalues, valid=valid, invalid=invalid, limit=limit, limit_area=limit_area, limit_direction=limit_direction, ) method = clean_fill_method(method) if method == ""pad"": values = pad_1d(values, limit=limit, mask=mask, dtype=dtype) else: values = backfill_1d(values, limit=limit, mask=mask, dtype=dtype) if orig_values.dtype.kind == ""M"": # convert float back to datetime64 values = values.astype(orig_values.dtype) values[preserve_nans] = fill_value return values " 48655,"def needs_wes(test_item: MT) -> MT: """"""Use as a decorator before test classes or methods to run only if WES is available."""""" test_item = _mark_test('wes', test_item) wes_url = os.environ.get('TOIL_WES_ENDPOINT') if not wes_url: return unittest.skip(f""Set TOIL_WES_ENDPOINT to include this test"")(test_item) try: urlopen(f""{wes_url}/ga4gh/wes/v1/service-info"") except (HTTPError, URLError) as e: return unittest.skip(f""Run a WES server on {wes_url} to include this test"")(test_item) return test_item ","def needs_wes_server(test_item: MT) -> MT: """"""Use as a decorator before test classes or methods to run only if a WES server is available to run against."""""" test_item = _mark_test('wes', test_item) wes_url = os.environ.get('TOIL_WES_ENDPOINT') if not wes_url: return unittest.skip(f""Set TOIL_WES_ENDPOINT to include this test"")(test_item) try: urlopen(f""{wes_url}/ga4gh/wes/v1/service-info"") except (HTTPError, URLError) as e: return unittest.skip(f""Run a WES server on {wes_url} to include this test"")(test_item) return test_item " 4396,"def _check_dict_keys(user_dict, valid_keys, dict_name=""Channel name(s)"", valid_name=""info""): """"""Check that the keys in dictionary are valid against a set list. Return the input dictionary if it is valid, otherwise raise a ValueError with a readable error message. Parameters ---------- user_dict : dict The name of the parameter to check. This is used in the error message. valid_keys : list All possible valid key names. Raises ------ ValueError When the key of the dict is not one of the valid options. Returns ------- user_dict When the keys are deemed acceptable the dictionary is returned. """""" sorted_dict = sorted(list(user_dict)) missing = [val not in valid_keys for val in sorted_dict] if any(missing): raise ValueError( f""{dict_name} is missing from {valid_name}: "" f""{np.array(sorted_dict)[np.array(missing)]}"") return user_dict ","def _check_dict_keys(user_dict, valid_keys, dict_name=""Channel name(s)"", valid_name=""info""): """"""Check that the keys in dictionary are valid against a set list. Return the input dictionary if it is valid, otherwise raise a ValueError with a readable error message. Parameters ---------- user_dict : dict The name of the parameter to check. This is used in the error message. valid_keys : list All possible valid key names. Raises ------ ValueError When the key of the dict is not one of the valid options. Returns ------- user_dict When the keys are deemed acceptable the dictionary is returned. """""" sorted_dict = sorted(list(user_dict)) missing = [val for val in sorted_dict if val not in valid_keys] if any(missing): raise ValueError( f""{dict_name} is missing from {valid_name}: "" f""{np.array(sorted_dict)[np.array(missing)]}"") return user_dict " 8941,"def say_section(bot, trigger, server, query, section): page_name = query.replace('_', ' ') query = quote(query.replace(' ', '_')) snippet = mw_section(server, query, section) if not snippet: bot.reply(""Error fetching section \""{}\"" for page \""{}\""."".format(section, page_name)) return msg = '{} - {} | ""{}""'.format(page_name, section.replace('_', ' '), snippet) bot.say(msg, trailing=' [...]""') ","def say_section(bot, trigger, server, query, section): page_name = query.replace('_', ' ') query = quote(query.replace(' ', '_')) snippet = mw_section(server, query, section) if not snippet: bot.reply(""Error fetching section \""{}\"" for page \""{}\""."".format(section, page_name)) return msg = '{} - {} | ""{}""'.format(page_name, section.replace('_', ' '), snippet) bot.say(msg, trailing=' […]""') " 45384,"def show_versions(as_json: str | bool = False) -> None: """""" Provide useful information, important for bug reports. It comprises info about hosting operation system, pandas version, and versions of other installed relative packages. Parameters ---------- as_json : str or bool, default: False * If False, outputs info in a human readable form to the console. * If str, it will be considered as a path to a file. Info will be written to that file in JSON format. * If True, outputs info in JSON format to the console. Notes ----- This is mostly a copy of pandas.show_versions() but adds separate listing of Modin-specific dependencies """""" sys_info = _get_sys_info() modin_deps = _get_modin_deps_info() deps = _get_dependency_info() if as_json: j = { ""system"": sys_info, ""modin dependencies"": modin_deps, ""pandas dependencies"": deps, } if as_json is True: sys.stdout.writelines(json.dumps(j, indent=2)) else: assert isinstance(as_json, str) # needed for mypy with codecs.open(as_json, ""wb"", encoding=""utf8"") as f: json.dump(j, f, indent=2) else: assert isinstance(sys_info[""LOCALE""], dict) # needed for mypy language_code = sys_info[""LOCALE""][""language-code""] encoding = sys_info[""LOCALE""][""encoding""] sys_info[""LOCALE""] = f""{language_code}.{encoding}"" maxlen = max(max(len(x) for x in d) for d in (deps, modin_deps)) print(""\nINSTALLED VERSIONS"") print(""------------------"") for k, v in sys_info.items(): print(f""{k:<{maxlen}}: {v}"") for name, d in ((""Modin"", modin_deps), (""pandas"", deps)): print(f""\n{name} dependencies\n{'-' * (len(name) + 13)}"") for k, v in d.items(): print(f""{k:<{maxlen}}: {v}"") ","def show_versions(as_json: str | bool = False) -> None: """""" Provide useful information, important for bug reports. It comprises info about hosting operation system, pandas version, and versions of other installed relative packages. Parameters ---------- as_json : str or bool, default: False * If False, outputs info in a human readable form to the console. * If str, it will be considered as a path to a file. Info will be written to that file in JSON format. * If True, outputs info in JSON format to the console. Notes ----- This is mostly a copy of pandas.show_versions() but adds separate listing of Modin-specific dependencies. """""" sys_info = _get_sys_info() modin_deps = _get_modin_deps_info() deps = _get_dependency_info() if as_json: j = { ""system"": sys_info, ""modin dependencies"": modin_deps, ""pandas dependencies"": deps, } if as_json is True: sys.stdout.writelines(json.dumps(j, indent=2)) else: assert isinstance(as_json, str) # needed for mypy with codecs.open(as_json, ""wb"", encoding=""utf8"") as f: json.dump(j, f, indent=2) else: assert isinstance(sys_info[""LOCALE""], dict) # needed for mypy language_code = sys_info[""LOCALE""][""language-code""] encoding = sys_info[""LOCALE""][""encoding""] sys_info[""LOCALE""] = f""{language_code}.{encoding}"" maxlen = max(max(len(x) for x in d) for d in (deps, modin_deps)) print(""\nINSTALLED VERSIONS"") print(""------------------"") for k, v in sys_info.items(): print(f""{k:<{maxlen}}: {v}"") for name, d in ((""Modin"", modin_deps), (""pandas"", deps)): print(f""\n{name} dependencies\n{'-' * (len(name) + 13)}"") for k, v in d.items(): print(f""{k:<{maxlen}}: {v}"") " 56751,"def augment_system(ode_func, n_states, n_theta): """""" Function to create augmented system. Take a function which specifies a set of differential equations and return a compiled function which allows for computation of gradients of the differential equation's solition with repsect to the parameters. Uses float64 even if floatX=float32, because the scipy integrator always uses float64. Parameters ---------- ode_func: function Differential equation. Returns array-like. n_states: int Number of rows of the sensitivity matrix. (n_states) n_theta: int Number of ODE parameters Returns ------- system: function Augemted system of differential equations. """""" # Present state of the system t_y = at.vector(""y"", dtype=""float64"") t_y.tag.test_value = np.ones((n_states,), dtype=""float64"") # Parameter(s). Should be vector to allow for generaliztion to multiparameter # systems of ODEs. Is m dimensional because it includes all initial conditions as well as ode parameters t_p = at.vector(""p"", dtype=""float64"") t_p.tag.test_value = np.ones((n_states + n_theta,), dtype=""float64"") # Time. Allow for non-automonous systems of ODEs to be analyzed t_t = at.scalar(""t"", dtype=""float64"") t_t.tag.test_value = 2.459 # Present state of the gradients: # Will always be 0 unless the parameter is the inital condition # Entry i,j is partial of y[i] wrt to p[j] dydp_vec = at.vector(""dydp"", dtype=""float64"") dydp_vec.tag.test_value = make_sens_ic(n_states, n_theta, ""float64"") dydp = dydp_vec.reshape((n_states, n_states + n_theta)) # Get symbolic representation of the ODEs by passing tensors for y, t and theta yhat = ode_func(t_y, t_t, t_p[n_states:]) if isinstance(yhat, at.TensorVariable): if yhat.ndim == 0: # Convert yhat from scalar to array t_yhat = at.stack((yhat,), axis=0) else: t_yhat = yhat elif isinstance(yhat, np.ndarray): msg = f'Invalid Output type for odefunc: {type(yhat)}.\n' msg += 'Valid output types are list, tuple, or at.TensorVariable.' raise TypeError(msg) else: # Stack the results of the ode_func into a single tensor variable if not isinstance(yhat, (list, tuple)): yhat = (yhat,) t_yhat = at.stack(yhat, axis=0) # Now compute gradients J = at.jacobian(t_yhat, t_y) Jdfdy = at.dot(J, dydp) grad_f = at.jacobian(t_yhat, t_p) # This is the time derivative of dydp ddt_dydp = (Jdfdy + grad_f).flatten() system = aesara.function( inputs=[t_y, t_t, t_p, dydp_vec], outputs=[t_yhat, ddt_dydp], on_unused_input=""ignore"" ) return system ","def augment_system(ode_func, n_states, n_theta): """""" Function to create augmented system. Take a function which specifies a set of differential equations and return a compiled function which allows for computation of gradients of the differential equation's solition with repsect to the parameters. Uses float64 even if floatX=float32, because the scipy integrator always uses float64. Parameters ---------- ode_func: function Differential equation. Returns array-like. n_states: int Number of rows of the sensitivity matrix. (n_states) n_theta: int Number of ODE parameters Returns ------- system: function Augemted system of differential equations. """""" # Present state of the system t_y = at.vector(""y"", dtype=""float64"") t_y.tag.test_value = np.ones((n_states,), dtype=""float64"") # Parameter(s). Should be vector to allow for generaliztion to multiparameter # systems of ODEs. Is m dimensional because it includes all initial conditions as well as ode parameters t_p = at.vector(""p"", dtype=""float64"") t_p.tag.test_value = np.ones((n_states + n_theta,), dtype=""float64"") # Time. Allow for non-automonous systems of ODEs to be analyzed t_t = at.scalar(""t"", dtype=""float64"") t_t.tag.test_value = 2.459 # Present state of the gradients: # Will always be 0 unless the parameter is the inital condition # Entry i,j is partial of y[i] wrt to p[j] dydp_vec = at.vector(""dydp"", dtype=""float64"") dydp_vec.tag.test_value = make_sens_ic(n_states, n_theta, ""float64"") dydp = dydp_vec.reshape((n_states, n_states + n_theta)) # Get symbolic representation of the ODEs by passing tensors for y, t and theta yhat = ode_func(t_y, t_t, t_p[n_states:]) if isinstance(yhat, at.TensorVariable): if yhat.ndim == 0: # Convert yhat from scalar to array t_yhat = at.stack((yhat,), axis=0) elif yhat.ndim == 2: t_yhat = yhat else: raise ValueError(f""The odefunc returned a {yhat.ndim}-dimensional tensor, but 0 or 1 dimensions were expected."") elif isinstance(yhat, np.ndarray): msg = f'Invalid Output type for odefunc: {type(yhat)}.\n' msg += 'Valid output types are list, tuple, or at.TensorVariable.' raise TypeError(msg) else: # Stack the results of the ode_func into a single tensor variable if not isinstance(yhat, (list, tuple)): yhat = (yhat,) t_yhat = at.stack(yhat, axis=0) # Now compute gradients J = at.jacobian(t_yhat, t_y) Jdfdy = at.dot(J, dydp) grad_f = at.jacobian(t_yhat, t_p) # This is the time derivative of dydp ddt_dydp = (Jdfdy + grad_f).flatten() system = aesara.function( inputs=[t_y, t_t, t_p, dydp_vec], outputs=[t_yhat, ddt_dydp], on_unused_input=""ignore"" ) return system " 44822,"def start_run( run_id: str = None, experiment_id: Optional[str] = None, run_name: Optional[str] = None, nested: bool = False, tags: Optional[Dict[str, Any]] = None, ) -> ActiveRun: """""" Start a new MLflow run, setting it as the active run under which metrics and parameters will be logged. The return value can be used as a context manager within a ``with`` block; otherwise, you must call ``end_run()`` to terminate the current run. If you pass a ``run_id`` or the ``MLFLOW_RUN_ID`` environment variable is set, ``start_run`` attempts to resume a run with the specified run ID and other parameters are ignored. ``run_id`` takes precedence over ``MLFLOW_RUN_ID``. If resuming an existing run, the run status is set to ``RunStatus.RUNNING``. MLflow sets a variety of default tags on the run, as defined in :ref:`MLflow system tags `. :param run_id: If specified, get the run with the specified UUID and log parameters and metrics under that run. The run's end time is unset and its status is set to running, but the run's other attributes (``source_version``, ``source_type``, etc.) are not changed. :param experiment_id: ID of the experiment under which to create the current run (applicable only when ``run_id`` is not specified). If ``experiment_id`` argument is unspecified, will look for valid experiment in the following order: activated using ``set_experiment``, ``MLFLOW_EXPERIMENT_NAME`` environment variable, ``MLFLOW_EXPERIMENT_ID`` environment variable, or the default experiment as defined by the tracking server. :param run_name: Name of new run (stored as a ``mlflow.runName`` tag). Used only when ``run_id`` is unspecified. :param nested: Controls whether run is nested in parent run. ``True`` creates a nested run. :param tags: An optional dictionary of string keys and values to set as tags on the run. If a run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are set on the new run. :return: :py:class:`mlflow.ActiveRun` object that acts as a context manager wrapping the run's state. .. code-block:: python :caption: Example import mlflow # Create nested runs with mlflow.start_run(run_name='PARENT_RUN') as parent_run: mlflow.log_param(""parent"", ""yes"") with mlflow.start_run(run_name='CHILD_RUN', nested=True) as child_run: mlflow.log_param(""child"", ""yes"") print(""parent run_id: {}"".format(parent_run.info.run_id)) print(""child run_id : {}"".format(child_run.info.run_id)) print(""--"") # Search all child runs with a parent id query = ""tags.mlflow.parentRunId = '{}'"".format(parent_run.info.run_id) results = mlflow.search_runs(filter_string=query) print(results[[""run_id"", ""params.child"", ""tags.mlflow.runName""]]) .. code-block:: text :caption: Output parent run_id: 5ec0e7ae18f54c2694ffb48c2fccf25c child run_id : 78b3b0d264b44cd29e8dc389749bb4be -- run_id params.child tags.mlflow.runName 0 78b3b0d264b44cd29e8dc389749bb4be yes CHILD_RUN """""" global _active_run_stack # back compat for int experiment_id experiment_id = str(experiment_id) if isinstance(experiment_id, int) else experiment_id if len(_active_run_stack) > 0 and not nested: raise Exception( ( ""Run with UUID {} is already active. To start a new run, first end the "" + ""current run with mlflow.end_run(). To start a nested "" + ""run, call start_run with nested=True"" ).format(_active_run_stack[0].info.run_id) ) client = MlflowClient() if run_id: existing_run_id = run_id elif _RUN_ID_ENV_VAR in os.environ: existing_run_id = os.environ[_RUN_ID_ENV_VAR] del os.environ[_RUN_ID_ENV_VAR] else: existing_run_id = None if existing_run_id: _validate_run_id(existing_run_id) active_run_obj = client.get_run(existing_run_id) # Check to see if experiment_id from environment matches experiment_id from set_experiment() if ( _active_experiment_id is not None and _active_experiment_id != active_run_obj.info.experiment_id ): raise MlflowException( ""Cannot start run with ID {} because active run ID "" ""does not match environment run ID. Make sure --experiment-name "" ""or --experiment-id matches experiment set with "" ""set_experiment(), or just use command-line "" ""arguments"".format(existing_run_id) ) # Check to see if current run isn't deleted if active_run_obj.info.lifecycle_stage == LifecycleStage.DELETED: raise MlflowException( ""Cannot start run with ID {} because it is in the "" ""deleted state."".format(existing_run_id) ) # Use previous end_time because a value is required for update_run_info end_time = active_run_obj.info.end_time _get_store().update_run_info( existing_run_id, run_status=RunStatus.RUNNING, end_time=end_time ) if tags: client.log_batch( run_id=existing_run_id, tags=[RunTag(key, str(value)) for key, value in tags.items()], ) active_run_obj = client.get_run(existing_run_id) else: if len(_active_run_stack) > 0: parent_run_id = _active_run_stack[-1].info.run_id else: parent_run_id = None exp_id_for_run = experiment_id if experiment_id is not None else _get_experiment_id() user_specified_tags = deepcopy(tags) or {} if parent_run_id is not None: user_specified_tags[MLFLOW_PARENT_RUN_ID] = parent_run_id if run_name is not None: user_specified_tags[MLFLOW_RUN_NAME] = run_name run_tags = context_registry.resolve_tags(user_specified_tags) active_run_obj = client.create_run(experiment_id=exp_id_for_run, tags=run_tags) _active_run_stack.append(ActiveRun(active_run_obj)) return _active_run_stack[-1] ","def start_run( run_id: str = None, experiment_id: Optional[str] = None, run_name: Optional[str] = None, nested: bool = False, tags: Optional[Dict[str, Any]] = None, ) -> ActiveRun: """""" Start a new MLflow run, setting it as the active run under which metrics and parameters will be logged. The return value can be used as a context manager within a ``with`` block; otherwise, you must call ``end_run()`` to terminate the current run. If you pass a ``run_id`` or the ``MLFLOW_RUN_ID`` environment variable is set, ``start_run`` attempts to resume a run with the specified run ID and other parameters are ignored. ``run_id`` takes precedence over ``MLFLOW_RUN_ID``. If resuming an existing run, the run status is set to ``RunStatus.RUNNING``. MLflow sets a variety of default tags on the run, as defined in :ref:`MLflow system tags `. :param run_id: If specified, get the run with the specified UUID and log parameters and metrics under that run. The run's end time is unset and its status is set to running, but the run's other attributes (``source_version``, ``source_type``, etc.) are not changed. :param experiment_id: ID of the experiment under which to create the current run (applicable only when ``run_id`` is not specified). If ``experiment_id`` argument is unspecified, will look for valid experiment in the following order: activated using ``set_experiment``, ``MLFLOW_EXPERIMENT_NAME`` environment variable, ``MLFLOW_EXPERIMENT_ID`` environment variable, or the default experiment as defined by the tracking server. :param run_name: Name of new run (stored as a ``mlflow.runName`` tag). Used only when ``run_id`` is unspecified. :param nested: Controls whether run is nested in parent run. ``True`` creates a nested run. :param tags: An optional dictionary of string keys and values to set as tags on the run. If a run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are set on the new run. :return: :py:class:`mlflow.ActiveRun` object that acts as a context manager wrapping the run's state. .. code-block:: python :caption: Example import mlflow # Create nested runs with mlflow.start_run(run_name='PARENT_RUN') as parent_run: mlflow.log_param(""parent"", ""yes"") with mlflow.start_run(run_name='CHILD_RUN', nested=True) as child_run: mlflow.log_param(""child"", ""yes"") print(""parent run_id: {}"".format(parent_run.info.run_id)) print(""child run_id : {}"".format(child_run.info.run_id)) print(""--"") # Search all child runs with a parent id query = ""tags.mlflow.parentRunId = '{}'"".format(parent_run.info.run_id) results = mlflow.search_runs(filter_string=query) print(results[[""run_id"", ""params.child"", ""tags.mlflow.runName""]]) .. code-block:: text :caption: Output parent run_id: 5ec0e7ae18f54c2694ffb48c2fccf25c child run_id : 78b3b0d264b44cd29e8dc389749bb4be -- run_id params.child tags.mlflow.runName 0 78b3b0d264b44cd29e8dc389749bb4be yes CHILD_RUN """""" global _active_run_stack # back compat for int experiment_id experiment_id = str(experiment_id) if isinstance(experiment_id, int) else experiment_id if len(_active_run_stack) > 0 and not nested: raise Exception( ( ""Run with UUID {} is already active. To start a new run, first end the "" + ""current run with mlflow.end_run(). To start a nested "" + ""run, call start_run with nested=True"" ).format(_active_run_stack[0].info.run_id) ) client = MlflowClient() if run_id: existing_run_id = run_id elif _RUN_ID_ENV_VAR in os.environ: existing_run_id = os.environ[_RUN_ID_ENV_VAR] del os.environ[_RUN_ID_ENV_VAR] else: existing_run_id = None if existing_run_id: _validate_run_id(existing_run_id) active_run_obj = client.get_run(existing_run_id) # Check to see if experiment_id from environment matches experiment_id from set_experiment() if ( _active_experiment_id is not None and _active_experiment_id != active_run_obj.info.experiment_id ): raise MlflowException( ""Cannot start run with ID {} because active run ID "" ""does not match environment run ID. Make sure --experiment-name "" ""or --experiment-id matches experiment set with "" ""set_experiment(), or just use command-line "" ""arguments"".format(existing_run_id) ) # Check to see if current run isn't deleted if active_run_obj.info.lifecycle_stage == LifecycleStage.DELETED: raise MlflowException( ""Cannot start run with ID {} because it is in the "" ""deleted state."".format(existing_run_id) ) # Use previous end_time because a value is required for update_run_info end_time = active_run_obj.info.end_time _get_store().update_run_info( existing_run_id, run_status=RunStatus.RUNNING, end_time=end_time ) if tags: client.log_batch( run_id=existing_run_id, tags=[RunTag(key, str(value)) for key, value in tags.items()], ) active_run_obj = client.get_run(existing_run_id) else: if len(_active_run_stack) > 0: parent_run_id = _active_run_stack[-1].info.run_id else: parent_run_id = None exp_id_for_run = experiment_id if experiment_id is not None else _get_experiment_id() user_specified_tags = deepcopy(tags) or {} if parent_run_id is not None: user_specified_tags[MLFLOW_PARENT_RUN_ID] = parent_run_id if run_name is not None: user_specified_tags[MLFLOW_RUN_NAME] = run_name resolved_tags = context_registry.resolve_tags(user_specified_tags) active_run_obj = client.create_run(experiment_id=exp_id_for_run, tags=run_tags) _active_run_stack.append(ActiveRun(active_run_obj)) return _active_run_stack[-1] " 44816,"def load_model(model_uri, artifact_path=None): """""" Load a CatBoost model from a local file or a run. :param model_uri: The location, in URI format, of the MLflow model. For example: - ``/Users/me/path/to/local/model`` - ``relative/path/to/local/model`` - ``s3://my_bucket/path/to/model`` - ``runs://run-relative/path/to/model`` For more information about supported URI schemes, see `Referencing Artifacts `_. :param artifact_path: The local filesystem path to which to download the model artifact. This directory must already exist. If unspecified, a local output path will be created. :return: A CatBoost model (an instance of `CatBoost`_, `CatBoostClassifier`_, or `CatBoostRegressor`_) """""" local_model_path = _download_artifact_from_uri( artifact_uri=model_uri, output_path=artifact_path ) flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) cb_model_file_path = os.path.join( local_model_path, flavor_conf.get(_MODEL_BINARY_KEY, _MODEL_BINARY_FILE_NAME) ) return _load_model( cb_model_file_path, flavor_conf.get(_MODEL_TYPE_KEY), flavor_conf.get(_SAVE_FORMAT_KEY) ) ","def load_model(model_uri, dst_path=None): """""" Load a CatBoost model from a local file or a run. :param model_uri: The location, in URI format, of the MLflow model. For example: - ``/Users/me/path/to/local/model`` - ``relative/path/to/local/model`` - ``s3://my_bucket/path/to/model`` - ``runs://run-relative/path/to/model`` For more information about supported URI schemes, see `Referencing Artifacts `_. :param artifact_path: The local filesystem path to which to download the model artifact. This directory must already exist. If unspecified, a local output path will be created. :return: A CatBoost model (an instance of `CatBoost`_, `CatBoostClassifier`_, or `CatBoostRegressor`_) """""" local_model_path = _download_artifact_from_uri( artifact_uri=model_uri, output_path=artifact_path ) flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) cb_model_file_path = os.path.join( local_model_path, flavor_conf.get(_MODEL_BINARY_KEY, _MODEL_BINARY_FILE_NAME) ) return _load_model( cb_model_file_path, flavor_conf.get(_MODEL_TYPE_KEY), flavor_conf.get(_SAVE_FORMAT_KEY) ) " 38274,"def get_status_code_distribution(db_session, endpoint_id): results = db_session.query(Request.status_code, func.count(Request.status_code)).filter( Request.endpoint_id == endpoint_id, Request.status_code.isnot(None)).group_by(Request.status_code).all() total_count = 0 for (_, frequency) in results: total_count += frequency distribution = {} for (status_code, frequency) in results: distribution[status_code] = frequency / total_count return distribution ","def get_status_code_distribution(db_session, endpoint_id): results = db_session.query(Request.status_code, func.count(Request.status_code)).filter( Request.endpoint_id == endpoint_id, Request.status_code.isnot(None)).group_by(Request.status_code).all() total_count = 0 for (_, frequency) in results: total_count += frequency distribution = {status_code:frequency / total_count for (status_code, frequency) in results} for (status_code, frequency) in results: distribution[status_code] = frequency / total_count return distribution " 7074,"def _parse_cli(*ids: str) -> List[Tokens]: """"""Parse a list of Cylc identifiers as provided on the CLI. * Validates identifiers. * Expands relative references to absolute ones. * Handles legacy Cylc7 syntax. Args: *ids (tuple): Identifier list. Raises: ValueError - For invalid identifiers or identifier lists. Returns: list - List of tokens dictionaries. Examples: # parse to tokens then detokenise back >>> from cylc.flow.id import detokenise >>> parse_back = lambda *ids: list(map(detokenise, _parse_cli(*ids))) # list of workflows: >>> parse_back('workworkflow') ['workworkflow'] >>> parse_back('workworkflow/') ['workworkflow'] >>> parse_back('workworkflow1', 'workworkflow2') ['workworkflow1', 'workworkflow2'] # absolute references >>> parse_back('workworkflow1//cycle1', 'workworkflow2//cycle2') ['workworkflow1//cycle1', 'workworkflow2//cycle2'] # relative references: >>> parse_back('workworkflow', '//cycle1', '//cycle2') ['workworkflow//cycle1', 'workworkflow//cycle2'] # mixed references >>> parse_back( ... 'workworkflow1', '//cycle', 'workworkflow2', ... '//cycle', 'workworkflow3//cycle' ... ) ['workworkflow1//cycle', 'workworkflow2//cycle', 'workworkflow3//cycle'] # legacy ids: >>> parse_back('workworkflow', 'task.123', 'a.b.c.234', '345/task') ['workworkflow//123/task', 'workworkflow//234/a.b.c', 'workworkflow//345/task'] # errors: >>> _parse_cli('////') Traceback (most recent call last): UserInputError: Invalid ID: //// >>> parse_back('//cycle') Traceback (most recent call last): UserInputError: Relative reference must follow an incomplete one. E.G: workflow //cycle/task >>> parse_back('workflow//cycle', '//cycle') Traceback (most recent call last): UserInputError: Relative reference must follow an incomplete one. E.G: workflow //cycle/task """""" # upgrade legacy ids if required ids = upgrade_legacy_ids(*ids) partials: Optional[Tokens] = None partials_expended: bool = False tokens_list: List[Tokens] = [] for id_ in ids: try: tokens = Tokens(id_) except ValueError: if id_.endswith('/') and not id_.endswith('//'): # noqa: SIM106 # tollerate IDs that end in a single slash on the CLI # (e.g. CLI auto completion) tokens = Tokens(id_[:-1]) else: raise UserInputError(f'Invalid ID: {id_}') is_partial = tokens.get('workflow') and not tokens.get('cycle') is_relative = not tokens.get('workflow') if partials: # we previously encountered a workflow ID which did not specify a # cycle if is_partial: # this is an absolute ID if not partials_expended: # no relative references were made to the previous ID # so add the whole workflow to the tokens list tokens_list.append(partials) partials = tokens partials_expended = False elif is_relative: # this is a relative reference => expand it using the context # of the partial ID tokens_list.append(Tokens( **{ **partials, **tokens, }, )) partials_expended = True else: # this is a fully expanded reference tokens_list.append(tokens) partials = None partials_expended = False else: # there was no previous reference that a relative reference # could apply to if is_partial: partials = tokens partials_expended = False elif is_relative: # so a relative reference is an error raise UserInputError( 'Relative reference must follow an incomplete one.' '\nE.G: workflow //cycle/task' ) else: tokens_list.append(tokens) if partials and not partials_expended: # if the last ID was a ""partial"" but not expanded add it to the list tokens_list.append(tokens) return tokens_list ","def _parse_cli(*ids: str) -> List[Tokens]: """"""Parse a list of Cylc identifiers as provided on the CLI. * Validates identifiers. * Expands relative references to absolute ones. * Handles legacy Cylc7 syntax. Args: *ids (tuple): Identifier list. Raises: ValueError - For invalid identifiers or identifier lists. Returns: list - List of tokens dictionaries. Examples: # parse to tokens then detokenise back >>> from cylc.flow.id import detokenise >>> parse_back = lambda *ids: list(map(detokenise, _parse_cli(*ids))) # list of workflows: >>> parse_back('workworkflow') ['workworkflow'] >>> parse_back('workworkflow/') ['workworkflow'] >>> parse_back('workworkflow1', 'workworkflow2') ['workworkflow1', 'workworkflow2'] # absolute references >>> parse_back('workworkflow1//cycle1', 'workworkflow2//cycle2') ['workworkflow1//cycle1', 'workworkflow2//cycle2'] # relative references: >>> parse_back('workworkflow', '//cycle1', '//cycle2') ['workworkflow//cycle1', 'workworkflow//cycle2'] # mixed references >>> parse_back( ... 'workworkflow1', '//cycle', 'workworkflow2', ... '//cycle', 'workworkflow3//cycle' ... ) ['workworkflow1//cycle', 'workworkflow2//cycle', 'workworkflow3//cycle'] # legacy ids: >>> parse_back('workworkflow', 'task.123', 'a.b.c.234', '345/task') ['workworkflow//123/task', 'workworkflow//234/a.b.c', 'workworkflow//345/task'] # errors: >>> _parse_cli('////') Traceback (most recent call last): UserInputError: Invalid ID: //// >>> parse_back('//cycle') Traceback (most recent call last): UserInputError: Relative reference must follow an incomplete one. E.G: workflow //cycle/task >>> parse_back('workflow//cycle', '//cycle') Traceback (most recent call last): UserInputError: Relative reference must follow an incomplete one. E.G: workflow //cycle/task """""" # upgrade legacy ids if required ids = upgrade_legacy_ids(*ids) partials: Optional[Tokens] = None partials_expended: bool = False tokens_list: List[Tokens] = [] for id_ in ids: try: tokens = Tokens(id_) except ValueError: if id_.endswith('/') and not id_.endswith('//'): # noqa: SIM106 # tolerate IDs that end in a single slash on the CLI # (e.g. CLI auto completion) tokens = Tokens(id_[:-1]) else: raise UserInputError(f'Invalid ID: {id_}') is_partial = tokens.get('workflow') and not tokens.get('cycle') is_relative = not tokens.get('workflow') if partials: # we previously encountered a workflow ID which did not specify a # cycle if is_partial: # this is an absolute ID if not partials_expended: # no relative references were made to the previous ID # so add the whole workflow to the tokens list tokens_list.append(partials) partials = tokens partials_expended = False elif is_relative: # this is a relative reference => expand it using the context # of the partial ID tokens_list.append(Tokens( **{ **partials, **tokens, }, )) partials_expended = True else: # this is a fully expanded reference tokens_list.append(tokens) partials = None partials_expended = False else: # there was no previous reference that a relative reference # could apply to if is_partial: partials = tokens partials_expended = False elif is_relative: # so a relative reference is an error raise UserInputError( 'Relative reference must follow an incomplete one.' '\nE.G: workflow //cycle/task' ) else: tokens_list.append(tokens) if partials and not partials_expended: # if the last ID was a ""partial"" but not expanded add it to the list tokens_list.append(tokens) return tokens_list " 50487,"def _report_lines_with_errors( lines_with_errors: List[_LineWithError], context: _Context ) -> None: """"""Log warnings and potentially re-throw exceptions"""""" if not lines_with_errors: return lines = [line for line, _ in lines_with_errors] errors = [error for _, error in lines_with_errors] lines_output = ""\n\t "".join(lines) logger.warning( f""Unrecognized GCOV output for {context.filename}\n"" f""\t {lines_output}\n"" f""\tThis is indicative of a gcov output parse error.\n"" f""\tPlease report this to the gcovr developers\n"" f""\tat ."" ) for ex in errors: logger.warning( ""Exception during parsing:\n\t{type}: {msg}"", type=type(ex).__name__, msg=ex ) if context.flags & ParserFlags.IGNORE_PARSE_ERRORS: return logger.error( ""Exiting because of parse errors.\n"" ""\tYou can run gcovr with --gcov-ignore-parse-errors\n"" ""\tto continue anyway."" ) # if we caught an exception, re-raise it for the traceback raise errors[0] # guaranteed to have at least one exception ","def _report_lines_with_errors( lines_with_errors: List[_LineWithError], context: _Context ) -> None: """"""Log warnings and potentially re-throw exceptions"""""" if not lines_with_errors: return lines = [line for line, _ in lines_with_errors] errors = [error for _, error in lines_with_errors] logger.warning( f""Unrecognized GCOV output for {context.filename}\n"" ""\t {'\n\t '.join(lines)\n"" ""\tThis is indicative of a gcov output parse error.\n"" ""\tPlease report this to the gcovr developers\n"" ""\tat ."" ) for ex in errors: logger.warning( ""Exception during parsing:\n\t{type}: {msg}"", type=type(ex).__name__, msg=ex ) if context.flags & ParserFlags.IGNORE_PARSE_ERRORS: return logger.error( ""Exiting because of parse errors.\n"" ""\tYou can run gcovr with --gcov-ignore-parse-errors\n"" ""\tto continue anyway."" ) # if we caught an exception, re-raise it for the traceback raise errors[0] # guaranteed to have at least one exception " 14307,"def programToCommands(program, getNumRegions=None): """"""Takes a T2CharString program list and returns list of commands. Each command is a two-tuple of commandname,arg-list. The commandname might be empty string if no commandname shall be emitted (used for glyph width, hintmask/cntrmask argument, as well as stray arguments at the end of the program (¯\_(ツ)_/¯). 'getNumRegions' may be None, or a callable object. It must return the number of regions. 'getNumRegions' takes a single argument, vsindex. If the vsindex argument is None, getNumRegions returns the default number of regions for the charstring, else it returns the numRegions for the vsindex. The Charstring may or may not start with a width value. If the first non-blend operator has an odd number of arguments, then the first argument is a width, and is popped off. This is complicated with blend operators, as there may be more than one before the first hint or moveto operator, and each one reduces several arguments to just one list argument. We have to sum the number of arguments that are not part of the blend arguments, and all the 'numBlends' values. We could instead have said that by definition, if there is a blend operator, there is no width value, since CFF2 Charstrings don't have width values. I discussed this with Behdad, and we are allowing for an initial width value in this case because developers may assemble a CFF2 charstring from CFF Charstrings, which coudl have width values. """""" width = None seenWidthOp = False vsIndex = None lenBlendStack = 0 lastBlendIndex = 0 commands = [] stack = [] it = iter(program) for token in it: if not isinstance(token, basestring): stack.append(token) continue if token == 'blend': assert getNumRegions is not None numSourceFonts = 1 + getNumRegions(vsIndex) # replace the blend op args on the stack with a single list # containing all the blend op args. numBlends = stack[-1] numBlendArgs = numBlends * numSourceFonts + 1 # replace first blend op by a list of the blend ops. stack[-numBlendArgs:] = [stack[-numBlendArgs:]] lenBlendStack += numBlends + len(stack) - 1 lastBlendIndex = len(stack) # if a blend op exists, this is or will be a CFF2 charstring. continue elif token == 'vsindex': vsIndex = stack[-1] assert type(vsIndex) is int elif (not seenWidthOp) and token in {'hstem', 'hstemhm', 'vstem', 'vstemhm', 'cntrmask', 'hintmask', 'hmoveto', 'vmoveto', 'rmoveto', 'endchar'}: seenWidthOp = True parity = token in {'hmoveto', 'vmoveto'} if lenBlendStack: # lenBlendStack has the number of args represented by the last blend # arg and all the preceding args. We need to now add the number of # args following the last blend arg. numArgs = lenBlendStack + len(stack[lastBlendIndex:]) else: numArgs = len(stack) if numArgs and (numArgs % 2) ^ parity: width = stack.pop(0) commands.append(('', [width])) if token in {'hintmask', 'cntrmask'}: if stack: commands.append(('', stack)) commands.append((token, [])) commands.append(('', [next(it)])) else: commands.append((token, stack)) stack = [] if stack: commands.append(('', stack)) return commands ","def programToCommands(program, getNumRegions=None): """"""Takes a T2CharString program list and returns list of commands. Each command is a two-tuple of commandname,arg-list. The commandname might be empty string if no commandname shall be emitted (used for glyph width, hintmask/cntrmask argument, as well as stray arguments at the end of the program (¯\_(ツ)_/¯). 'getNumRegions' may be None, or a callable object. It must return the number of regions. 'getNumRegions' takes a single argument, vsindex. If the vsindex argument is None, getNumRegions returns the default number of regions for the charstring, else it returns the numRegions for the vsindex. The Charstring may or may not start with a width value. If the first non-blend operator has an odd number of arguments, then the first argument is a width, and is popped off. This is complicated with blend operators, as there may be more than one before the first hint or moveto operator, and each one reduces several arguments to just one list argument. We have to sum the number of arguments that are not part of the blend arguments, and all the 'numBlends' values. We could instead have said that by definition, if there is a blend operator, there is no width value, since CFF2 Charstrings don't have width values. I discussed this with Behdad, and we are allowing for an initial width value in this case because developers may assemble a CFF2 charstring from CFF Charstrings, which could have width values. """""" width = None seenWidthOp = False vsIndex = None lenBlendStack = 0 lastBlendIndex = 0 commands = [] stack = [] it = iter(program) for token in it: if not isinstance(token, basestring): stack.append(token) continue if token == 'blend': assert getNumRegions is not None numSourceFonts = 1 + getNumRegions(vsIndex) # replace the blend op args on the stack with a single list # containing all the blend op args. numBlends = stack[-1] numBlendArgs = numBlends * numSourceFonts + 1 # replace first blend op by a list of the blend ops. stack[-numBlendArgs:] = [stack[-numBlendArgs:]] lenBlendStack += numBlends + len(stack) - 1 lastBlendIndex = len(stack) # if a blend op exists, this is or will be a CFF2 charstring. continue elif token == 'vsindex': vsIndex = stack[-1] assert type(vsIndex) is int elif (not seenWidthOp) and token in {'hstem', 'hstemhm', 'vstem', 'vstemhm', 'cntrmask', 'hintmask', 'hmoveto', 'vmoveto', 'rmoveto', 'endchar'}: seenWidthOp = True parity = token in {'hmoveto', 'vmoveto'} if lenBlendStack: # lenBlendStack has the number of args represented by the last blend # arg and all the preceding args. We need to now add the number of # args following the last blend arg. numArgs = lenBlendStack + len(stack[lastBlendIndex:]) else: numArgs = len(stack) if numArgs and (numArgs % 2) ^ parity: width = stack.pop(0) commands.append(('', [width])) if token in {'hintmask', 'cntrmask'}: if stack: commands.append(('', stack)) commands.append((token, [])) commands.append(('', [next(it)])) else: commands.append((token, stack)) stack = [] if stack: commands.append(('', stack)) return commands " 31542,"def collect_campaign_recipients(): try: selected_ids = demisto.args()['new'] if not selected_ids: return '' incidents = get_campaign_incidents() if ALL_INCIDENTS not in selected_ids: incidents = filter(lambda incident: incident['id'] in selected_ids, incidents) recipient_set = {recipient for incident in incidents for recipient in incident['recipients']} return ','.join(recipient_set) except KeyError as e: raise Exception(f'Missing required arg: {str(e)}') ","def collect_campaign_recipients(): try: selected_ids = demisto.args()['new'] if not selected_ids: return '' incidents = get_campaign_incidents() if ALL_INCIDENTS not in selected_ids: incidents = filter(lambda incident: incident['id'] in selected_ids, incidents) recipient_set = {recipient for incident in incidents for recipient in incident['recipients']} return ','.join(recipient_set) except KeyError as e: raise DemistoException(f'Missing required arg: {str(e)}') from e " 41164,"def _with_parameterized_layers( circuit: 'cirq.Circuit', qubits: Sequence['cirq.Qid'], no_initialization: bool, ) -> 'cirq.Circuit': """"""Return a copy of the input circuit with a parameterized single-qubit rotations. These rotations flank the circuit: the initial two layers of X and Y gates are given parameter names ""{qubit}-Xi"" and ""{qubit}-Yi"" and are used to set up the initial state. If `no_initialization` is set to True, these two layers of gates are omitted. The final two layers of X and Y gates are given parameter names ""{qubit}-Xf"" and ""{qubit}-Yf"" and are use to change the frame of the qubit before measurement, effectively measuring in bases other than Z. """""" x_beg_mom = ops.Moment([ops.X(q) ** sympy.Symbol(f'{q}-Xi') for q in qubits]) y_beg_mom = ops.Moment([ops.Y(q) ** sympy.Symbol(f'{q}-Yi') for q in qubits]) x_end_mom = ops.Moment([ops.X(q) ** sympy.Symbol(f'{q}-Xf') for q in qubits]) y_end_mom = ops.Moment([ops.Y(q) ** sympy.Symbol(f'{q}-Yf') for q in qubits]) meas_mom = ops.Moment([ops.measure(*qubits, key='z')]) if no_initialization: total_circuit = circuit.copy() else: total_circuit = circuits.Circuit([x_beg_mom, y_beg_mom]) total_circuit += circuit.copy() total_circuit.append([x_end_mom, y_end_mom, meas_mom]) return total_circuit ","def _with_parameterized_layers( circuit: 'cirq.Circuit', qubits: Sequence['cirq.Qid'], no_initialization: bool, ) -> 'cirq.Circuit': """"""Return a copy of the input circuit with parameterized single-qubit rotations. These rotations flank the circuit: the initial two layers of X and Y gates are given parameter names ""{qubit}-Xi"" and ""{qubit}-Yi"" and are used to set up the initial state. If `no_initialization` is set to True, these two layers of gates are omitted. The final two layers of X and Y gates are given parameter names ""{qubit}-Xf"" and ""{qubit}-Yf"" and are use to change the frame of the qubit before measurement, effectively measuring in bases other than Z. """""" x_beg_mom = ops.Moment([ops.X(q) ** sympy.Symbol(f'{q}-Xi') for q in qubits]) y_beg_mom = ops.Moment([ops.Y(q) ** sympy.Symbol(f'{q}-Yi') for q in qubits]) x_end_mom = ops.Moment([ops.X(q) ** sympy.Symbol(f'{q}-Xf') for q in qubits]) y_end_mom = ops.Moment([ops.Y(q) ** sympy.Symbol(f'{q}-Yf') for q in qubits]) meas_mom = ops.Moment([ops.measure(*qubits, key='z')]) if no_initialization: total_circuit = circuit.copy() else: total_circuit = circuits.Circuit([x_beg_mom, y_beg_mom]) total_circuit += circuit.copy() total_circuit.append([x_end_mom, y_end_mom, meas_mom]) return total_circuit " 34536,"def create_interpreter( obj: Union[ ""rasa.shared.nlu.interpreter.NaturalLanguageInterpreter"", EndpointConfig, Text, None, ] ) -> ""rasa.shared.nlu.interpreter.NaturalLanguageInterpreter"": """"""Factory to create an natural language interpreter."""""" if isinstance(obj, rasa.shared.nlu.interpreter.NaturalLanguageInterpreter): return obj elif isinstance(obj, str) and os.path.exists(obj): return RasaNLUInterpreter(model_directory=obj) elif isinstance(obj, str) and not os.path.exists(obj): # user passed in a string, but file does not exist logger.warning( f""No local NLU model '{obj}' found. Using RegexInterpreter instead."" ) return rasa.shared.nlu.interpreter.RegexInterpreter() else: return _create_from_endpoint_config(obj) ","def create_interpreter( obj: Union[ ""rasa.shared.nlu.interpreter.NaturalLanguageInterpreter"", EndpointConfig, Text, None, ] ) -> ""rasa.shared.nlu.interpreter.NaturalLanguageInterpreter"": """"""Factory to create an natural language interpreter."""""" if isinstance(obj, rasa.shared.nlu.interpreter.NaturalLanguageInterpreter): return obj elif isinstance(obj, str) and os.path.exists(obj): return RasaNLUInterpreter(model_directory=obj) elif isinstance(obj, str): # user passed in a string, but file does not exist logger.warning( f""No local NLU model '{obj}' found. Using RegexInterpreter instead."" ) return rasa.shared.nlu.interpreter.RegexInterpreter() else: return _create_from_endpoint_config(obj) " 5378,"def test_present(): """""" Test to ensure a job is present in the beacon. """""" beacon_name = ""ps"" ret = {""name"": beacon_name, ""changes"": {}, ""result"": False, ""comment"": """"} mock_mod = MagicMock(return_value=ret) mock_lst = MagicMock(side_effect=[{beacon_name: {}}, {beacon_name: {}}, {}, {}]) with patch.dict( beacon.__salt__, { ""beacons.list"": mock_lst, ""beacons.modify"": mock_mod, ""beacons.add"": mock_mod, }, ): assert beacon.present(beacon_name) == ret with patch.dict(beacon.__opts__, {""test"": False}): assert beacon.present(beacon_name) == ret assert beacon.present(beacon_name) == ret with patch.dict(beacon.__opts__, {""test"": True}): ret.update({""result"": True}) assert beacon.present(beacon_name) == ret ","def test_present(): """""" Test to ensure a job is present in the beacon. """""" beacon_name = ""ps"" ret = {""name"": beacon_name, ""changes"": {}, ""result"": False, ""comment"": """"} mock_mod = MagicMock(return_value=ret) mock_lst = MagicMock(side_effect=[{beacon_name: {}}, {beacon_name: {}}, {}, {}]) with patch.dict( beacon.__salt__, { ""beacons.list"": mock_lst, ""beacons.modify"": mock_mod, ""beacons.add"": mock_mod, }, ): assert beacon.present(beacon_name) == ret with patch.dict(beacon.__opts__, {""test"": False}): assert beacon.present(beacon_name) == ret with patch.dict(beacon.__opts__, {""test"": True}): ret.update({""result"": True}) assert beacon.present(beacon_name) == ret " 49884,"def get_sky_diffuse(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=None, airmass=None, model='isotropic', model_perez='allsitescomposite1990'): r"""""" Determine in-plane sky diffuse irradiance component using the specified sky diffuse irradiance model. Sky diffuse models include: * isotropic (default) * klucher * haydavies * reindl * king * perez Parameters ---------- surface_tilt : numeric Panel tilt from horizontal.[degree] surface_azimuth : numeric Panel azimuth from north. [degree] solar_zenith : numeric Solar zenith angle. [degree] solar_azimuth : numeric Solar azimuth angle. [degree] dni : numeric Direct Normal Irradiance. [W/m2] ghi : numeric Global horizontal irradiance. [W/m2] dhi : numeric Diffuse horizontal irradiance. [W/m2] dni_extra : None or numeric, default None Extraterrestrial direct normal irradiance. [W/m2] airmass : None or numeric, default None Relative airmass (not adjusted for pressure). [unitless] model : String, default 'isotropic' Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies', 'reindl', 'king', 'perez'. model_perez : String, default 'allsitescomposite1990' Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`. Returns ------- poa_sky_diffuse : numeric Sky diffuse irradiance in the plane of array. [W/m2] Raises ------ ValueError If model is one of 'haydavies', 'reindl', or 'perez' and dni_extra is None. Notes ----- Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`. The 'perez' model requires relative airmass ('airmass') as input. If 'airmass' is not provided, it is calculated usign the defaults in :py:func:`~pvlib.irradiance.get_relative_airmass`. """""" model = model.lower() if (model in {'haydavies', 'reindl', 'perez'}) and (dni_extra is None): raise ValueError(f'dni_extra is required for model {model}') if model == 'isotropic': sky = isotropic(surface_tilt, dhi) elif model == 'klucher': sky = klucher(surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth) elif model == 'haydavies': sky = haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth) elif model == 'reindl': sky = reindl(surface_tilt, surface_azimuth, dhi, dni, ghi, dni_extra, solar_zenith, solar_azimuth) elif model == 'king': sky = king(surface_tilt, dhi, ghi, solar_zenith) elif model == 'perez': if airmass is None: airmass = atmosphere.get_relative_airmass(solar_zenith) sky = perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth, airmass, model=model_perez) else: raise ValueError(f'invalid model selection {model}') return sky ","def get_sky_diffuse(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=None, airmass=None, model='isotropic', model_perez='allsitescomposite1990'): r"""""" Determine in-plane sky diffuse irradiance component using the specified sky diffuse irradiance model. Sky diffuse models include: * isotropic (default) * klucher * haydavies * reindl * king * perez Parameters ---------- surface_tilt : numeric Panel tilt from horizontal.[degree] surface_azimuth : numeric Panel azimuth from north. [degree] solar_zenith : numeric Solar zenith angle. [degree] solar_azimuth : numeric Solar azimuth angle. [degree] dni : numeric Direct Normal Irradiance. [W/m2] ghi : numeric Global horizontal irradiance. [W/m2] dhi : numeric Diffuse horizontal irradiance. [W/m2] dni_extra : None or numeric, default None Extraterrestrial direct normal irradiance. [W/m2] airmass : None or numeric, default None Relative airmass (not adjusted for pressure). [unitless] model : String, default 'isotropic' Irradiance model. Can be one of ``'isotropic'``, ``'klucher'``, ``'haydavies'``, ``'reindl'``, ``'king'``, ``'perez'``. model_perez : String, default 'allsitescomposite1990' Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`. Returns ------- poa_sky_diffuse : numeric Sky diffuse irradiance in the plane of array. [W/m2] Raises ------ ValueError If model is one of 'haydavies', 'reindl', or 'perez' and dni_extra is None. Notes ----- Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`. The 'perez' model requires relative airmass ('airmass') as input. If 'airmass' is not provided, it is calculated usign the defaults in :py:func:`~pvlib.irradiance.get_relative_airmass`. """""" model = model.lower() if (model in {'haydavies', 'reindl', 'perez'}) and (dni_extra is None): raise ValueError(f'dni_extra is required for model {model}') if model == 'isotropic': sky = isotropic(surface_tilt, dhi) elif model == 'klucher': sky = klucher(surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth) elif model == 'haydavies': sky = haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth) elif model == 'reindl': sky = reindl(surface_tilt, surface_azimuth, dhi, dni, ghi, dni_extra, solar_zenith, solar_azimuth) elif model == 'king': sky = king(surface_tilt, dhi, ghi, solar_zenith) elif model == 'perez': if airmass is None: airmass = atmosphere.get_relative_airmass(solar_zenith) sky = perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth, airmass, model=model_perez) else: raise ValueError(f'invalid model selection {model}') return sky " 30828,"def get_security_profiles_command(): """""" Get information about profiles. """""" security_profile = demisto.args().get('security_profile') if security_profile: xpath = f'{XPATH_RULEBASE}profiles/{security_profile}' else: xpath = f'{XPATH_RULEBASE}profiles' result = get_security_profile(xpath) if security_profile: security_profiles = result.get('response', {}).get('result', {}) else: security_profiles = result.get('response', {}).get('result', {}).get('profiles', {}) if '@dirtyId' in security_profiles: LOG(f'Found uncommitted item:\n{security_profiles}') raise Exception('Please commit the instance prior to getting the security profiles.') human_readable = '' content: List[Dict[str, Any]] = [] context = {} if 'spyware' in security_profiles: profiles = security_profiles.get('spyware').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) spyware_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': spyware_rules }) else: rules = profiles.get('rules', {}).get('entry', []) spyware_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': spyware_rules } human_readable = tableToMarkdown('Anti Spyware Profiles', content) context.update({""Panorama.Spyware(val.Name == obj.Name)"": content}) if 'virus' in security_profiles: profiles = security_profiles.get('virus').get('entry', []) if isinstance(profiles, list): for profile in profiles: rules = profile.get('decoder', {}).get('entry', []) antivirus_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Decoder': antivirus_rules }) else: rules = profiles.get('decoder', {}).get('entry', []) antivirus_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': antivirus_rules } human_readable += tableToMarkdown('Antivirus Profiles', content) context.update({""Panorama.Antivirus(val.Name == obj.Name)"": content}) if 'file-blocking' in security_profiles: profiles = security_profiles.get('file-blocking').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) file_blocking_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': file_blocking_rules }) else: rules = profiles.get('rules', {}).get('entry', []) file_blocking_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': file_blocking_rules } human_readable += tableToMarkdown('File Blocking Profiles', content) context.update({""Panorama.FileBlocking(val.Name == obj.Name)"": content}) if 'vulnerability' in security_profiles: profiles = security_profiles.get('vulnerability').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) vulnerability_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': vulnerability_rules }) else: rules = profiles.get('rules', {}).get('entry', []) vulnerability_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': vulnerability_rules } human_readable += tableToMarkdown('vulnerability Protection Profiles', content) context.update({""Panorama.Vulnerability(val.Name == obj.Name)"": content}) if 'data-filtering' in security_profiles: profiles = security_profiles.get('data-filtering').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) data_filtering_rules = prettify_data_filtering_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': data_filtering_rules }) else: rules = profiles.get('rules', {}).get('entry', []) data_filtering_rules = prettify_data_filtering_rules(rules) content = { 'Name': profiles['@name'], 'Rules': data_filtering_rules } human_readable += tableToMarkdown('Data Filtering Profiles', content) context.update({""Panorama.DataFiltering(val.Name == obj.Name)"": content}) if 'url-filtering' in security_profiles: profiles = security_profiles.get('url-filtering').get('entry', []) if isinstance(profiles, list): for profile in profiles: url_filtering_rules = prettify_get_url_filter(profile) content.append({ 'Name': profile['@name'], 'Rules': url_filtering_rules }) else: url_filtering_rules = prettify_get_url_filter(profiles) content = { 'Name': profiles['@name'], 'Rules': url_filtering_rules } human_readable += tableToMarkdown('URL Filtering Profiles', content) context.update({""Panorama.URLFilter(val.Name == obj.Name)"": content}) if 'wildfire-analysis' in security_profiles: profiles = security_profiles.get('wildfire-analysis').get('entry', []) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) wildfire_rules = prettify_wildfire_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': wildfire_rules }) else: rules = profiles.get('rules', {}).get('entry', []) wildfire_rules = prettify_wildfire_rules(rules) content = { 'Name': profiles['@name'], 'Rules': wildfire_rules } human_readable += tableToMarkdown('WildFire Profiles', content) context.update({""Panorama.WildFire(val.Name == obj.Name)"": content}) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': context }) ","def get_security_profiles_command(): """""" Get information about profiles. """""" security_profile = demisto.args().get('security_profile') if security_profile: xpath = f'{XPATH_RULEBASE}profiles/{security_profile}' else: xpath = f'{XPATH_RULEBASE}profiles' result = get_security_profile(xpath) if security_profile: security_profiles = result.get('response', {}).get('result', {}) else: security_profiles = result.get('response', {}).get('result', {}).get('profiles', {}) if '@dirtyId' in security_profiles: LOG(f'Found uncommitted item:\n{security_profiles}') raise Exception('Please commit the instance prior to getting the security profiles.') human_readable = '' content: List[Dict[str, Any]] = [] context = {} if 'spyware' in security_profiles: profiles = security_profiles.get('spyware').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) spyware_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': spyware_rules }) else: rules = profiles.get('rules', {}).get('entry', []) spyware_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': spyware_rules } human_readable = tableToMarkdown('Anti Spyware Profiles', content) context.update({""Panorama.Spyware(val.Name == obj.Name)"": content}) if 'virus' in security_profiles: profiles = security_profiles.get('virus').get('entry', []) if isinstance(profiles, list): for profile in profiles: rules = profile.get('decoder', {}).get('entry', []) antivirus_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Decoder': antivirus_rules }) else: rules = profiles.get('decoder', {}).get('entry', []) antivirus_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': antivirus_rules } human_readable += tableToMarkdown('Antivirus Profiles', content) context.update({""Panorama.Antivirus(val.Name == obj.Name)"": content}) if 'file-blocking' in security_profiles: profiles = security_profiles.get('file-blocking', {}).get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) file_blocking_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': file_blocking_rules }) else: rules = profiles.get('rules', {}).get('entry', []) file_blocking_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': file_blocking_rules } human_readable += tableToMarkdown('File Blocking Profiles', content) context.update({""Panorama.FileBlocking(val.Name == obj.Name)"": content}) if 'vulnerability' in security_profiles: profiles = security_profiles.get('vulnerability').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) vulnerability_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': vulnerability_rules }) else: rules = profiles.get('rules', {}).get('entry', []) vulnerability_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': vulnerability_rules } human_readable += tableToMarkdown('vulnerability Protection Profiles', content) context.update({""Panorama.Vulnerability(val.Name == obj.Name)"": content}) if 'data-filtering' in security_profiles: profiles = security_profiles.get('data-filtering').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) data_filtering_rules = prettify_data_filtering_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': data_filtering_rules }) else: rules = profiles.get('rules', {}).get('entry', []) data_filtering_rules = prettify_data_filtering_rules(rules) content = { 'Name': profiles['@name'], 'Rules': data_filtering_rules } human_readable += tableToMarkdown('Data Filtering Profiles', content) context.update({""Panorama.DataFiltering(val.Name == obj.Name)"": content}) if 'url-filtering' in security_profiles: profiles = security_profiles.get('url-filtering').get('entry', []) if isinstance(profiles, list): for profile in profiles: url_filtering_rules = prettify_get_url_filter(profile) content.append({ 'Name': profile['@name'], 'Rules': url_filtering_rules }) else: url_filtering_rules = prettify_get_url_filter(profiles) content = { 'Name': profiles['@name'], 'Rules': url_filtering_rules } human_readable += tableToMarkdown('URL Filtering Profiles', content) context.update({""Panorama.URLFilter(val.Name == obj.Name)"": content}) if 'wildfire-analysis' in security_profiles: profiles = security_profiles.get('wildfire-analysis').get('entry', []) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) wildfire_rules = prettify_wildfire_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': wildfire_rules }) else: rules = profiles.get('rules', {}).get('entry', []) wildfire_rules = prettify_wildfire_rules(rules) content = { 'Name': profiles['@name'], 'Rules': wildfire_rules } human_readable += tableToMarkdown('WildFire Profiles', content) context.update({""Panorama.WildFire(val.Name == obj.Name)"": content}) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': context }) " 37234,"def _ginibre_matrix(nrow, ncol, seed): """"""Return a normally distributed complex random matrix. Args: nrow (int): number of rows in output matrix. ncol (int): number of columns in output matrix. seed(int or default_rng): default_rng for rng. Returns: ndarray: A complex rectangular matrix where each real and imaginary entry is sampled from the normal distribution. """""" if seed is None: rng = np.random.default_rng() elif isinstance(seed, np.random.Generator): rng = seed else: rng = default_rng(seed) ginibre = rng.normal( size=(nrow, ncol)) + rng.normal(size=(nrow, ncol)) * 1j return ginibre ","def _ginibre_matrix(nrow, ncol, seed): """"""Return a normally distributed complex random matrix. Args: nrow (int): number of rows in output matrix. ncol (int): number of columns in output matrix. seed(int or np.random.Generator): default_rng for rng. Returns: ndarray: A complex rectangular matrix where each real and imaginary entry is sampled from the normal distribution. """""" if seed is None: rng = np.random.default_rng() elif isinstance(seed, np.random.Generator): rng = seed else: rng = default_rng(seed) ginibre = rng.normal( size=(nrow, ncol)) + rng.normal(size=(nrow, ncol)) * 1j return ginibre " 4223,"def annotate_muscle(raw, threshold=1.5, picks=None, min_length_good=.1): """"""Detect segments with muscle artifacts. Detects segments periods that contains high frequency activity beyond the specified threshold. Muscle artifacts are most notable in the range of 110- 140Hz. Raw data is band pass filtered between 110 and 140 Hz, the signal envelope computed, z-scored across samples, channel averaged and low-pass filtered to smooth transient peaks. Parameters ---------- raw : instance of Raw Data to compute head position. threshold : float The threshod for selecting segments with muscle activity artifacts. picks : array Channels to use for artifact detection. min_length_good : int | float | None The minimal good segment length between annotations, smaller segments will be included in the movement annotation. Returns ------- annot : mne.Annotations Periods with muscle artifacts. scores_muscle : array Z-score values averaged accros channels for each sample. """""" raw_copy = raw.copy() raw_copy.pick(picks) raw_copy.pick_types(ref_meg=False) # Remove ref chans just in case # Only one type of channel, otherwise z-score will be biased assert(len(set(raw_copy.get_channel_types())) == 1), 'Different channel ' \ 'types, pick one type' raw_copy.filter(110, 140, fir_design='firwin') raw_copy.apply_hilbert(envelope=True) sfreq = raw_copy.info['sfreq'] art_scores = zscore(raw_copy._data, axis=1) scores_muscle = filter_data(art_scores.mean(axis=0), sfreq, None, 4) art_mask = scores_muscle > threshold # remove artifact free periods shorter than min_length_good idx_min = min_length_good * sfreq comps, num_comps = label(art_mask == 0) for l in range(1, num_comps + 1): l_idx = np.nonzero(comps == l)[0] if len(l_idx) < idx_min: art_mask[l_idx] = True annot = _annotations_from_mask(raw_copy.times, art_mask, 'BAD_muscle') return annot, scores_muscle ","def annotate_muscle(raw, threshold=1.5, picks=None, min_length_good=.1): """"""Detect segments with muscle artifacts. Detects segments periods that contains high frequency activity beyond the specified threshold. Muscle artifacts are most notable in the range of 110- 140Hz. Raw data is band-pass filtered between 110 and 140 Hz, the signal envelope computed, z-scored across samples, channel averaged and low-pass filtered to smooth transient peaks. Parameters ---------- raw : instance of Raw Data to compute head position. threshold : float The threshod for selecting segments with muscle activity artifacts. picks : array Channels to use for artifact detection. min_length_good : int | float | None The minimal good segment length between annotations, smaller segments will be included in the movement annotation. Returns ------- annot : mne.Annotations Periods with muscle artifacts. scores_muscle : array Z-score values averaged accros channels for each sample. """""" raw_copy = raw.copy() raw_copy.pick(picks) raw_copy.pick_types(ref_meg=False) # Remove ref chans just in case # Only one type of channel, otherwise z-score will be biased assert(len(set(raw_copy.get_channel_types())) == 1), 'Different channel ' \ 'types, pick one type' raw_copy.filter(110, 140, fir_design='firwin') raw_copy.apply_hilbert(envelope=True) sfreq = raw_copy.info['sfreq'] art_scores = zscore(raw_copy._data, axis=1) scores_muscle = filter_data(art_scores.mean(axis=0), sfreq, None, 4) art_mask = scores_muscle > threshold # remove artifact free periods shorter than min_length_good idx_min = min_length_good * sfreq comps, num_comps = label(art_mask == 0) for l in range(1, num_comps + 1): l_idx = np.nonzero(comps == l)[0] if len(l_idx) < idx_min: art_mask[l_idx] = True annot = _annotations_from_mask(raw_copy.times, art_mask, 'BAD_muscle') return annot, scores_muscle " 7685,"def upgrade(): op.create_table( 'receipt_templates', sa.Column('id', sa.Integer(), nullable=False, primary_key=True), sa.Column('title', sa.String(), nullable=False), sa.Column('event_id', sa.Integer(), nullable=True, index=True), sa.Column('category_id', sa.Integer(), nullable=True, index=True), sa.Column('html', sa.String(), nullable=False), sa.Column('css', sa.String(), nullable=False), sa.Column('custom_fields', postgresql.JSONB(astext_type=sa.Text()), nullable=False), sa.CheckConstraint( '(event_id IS NULL) != (category_id IS NULL)', name=op.f('ck_receipt_templates_event_xor_category_id_null') ), sa.ForeignKeyConstraint(['category_id'], ['categories.categories.id']), sa.ForeignKeyConstraint(['event_id'], ['events.events.id']), schema='indico', ) ","def upgrade(): op.create_table( 'receipt_templates', sa.Column('id', sa.Integer(), nullable=False, primary_key=True), sa.Column('title', sa.String(), nullable=False), sa.Column('event_id', sa.Integer(), nullable=True, index=True), sa.Column('category_id', sa.Integer(), nullable=True, index=True), sa.Column('html', sa.String(), nullable=False), sa.Column('css', sa.String(), nullable=False), sa.Column('custom_fields', postgresql.JSONB(astext_type=sa.Text()), nullable=False), sa.CheckConstraint( '(event_id IS NULL) != (category_id IS NULL)', name='event_xor_category_id_null' ), sa.ForeignKeyConstraint(['category_id'], ['categories.categories.id']), sa.ForeignKeyConstraint(['event_id'], ['events.events.id']), schema='indico', ) " 11801,"def difference(image1, image2): """""" Returns the absolute value of the pixel-by-pixel difference between the two images. At least one of the images must be ""1"" mode. .. code-block:: python out = abs(image1 - image2) :rtype: :py:class:`~PIL.Image.Image` """""" image1.load() image2.load() return image1._new(image1.im.chop_difference(image2.im)) ","def difference(image1, image2): """""" Returns the absolute value of the pixel-by-pixel difference between the two images. At least one of the images must have mode ""1"". .. code-block:: python out = abs(image1 - image2) :rtype: :py:class:`~PIL.Image.Image` """""" image1.load() image2.load() return image1._new(image1.im.chop_difference(image2.im)) " 31477,"def get_access_token(refresh_token: str): """""" Gets access token from os environment if it was saved there. Else, generates access token from refresh token. Args: refresh_token(str): refresh token to obtain access token, to send mails using gmail API Returns: access_token(str): access token is used to send mails using gmail API """""" access_token = os.getenv('ACCESS_TOKEN') valid_until = int(os.getenv('VALID_UNTIL')) if os.getenv('VALID_UNTIL') else None # check if access token is valid if access_token and valid_until: if int(datetime.now().timestamp()) < valid_until: return access_token if not refresh_token: print(f""Error obtaining access token. Failed sending mails."") sys.exit(1) # else access token should be obtained from refresh token http_client = httplib2.Http() body = { 'refresh_token': refresh_token, 'client_id': GMAIL_CLIENT_ID, 'grant_type': 'refresh_token', } resp, content = http_client.request(TOKEN_URL, ""POST"", urllib.parse.urlencode(body), TOKEN_FORM_HEADERS) if resp.status not in [200, 201]: print(f""Error obtaining access token. Failed sending mails."") sys.exit(1) parsed_response = json.loads(content) access_token = parsed_response.get('access_token') expires_in = parsed_response.get('expires_in', 3595) time_now = int(datetime.now().timestamp()) time_buffer = 5 # seconds by which to shorten the validity period if expires_in - time_buffer > 0: # err on the side of caution with a slightly shorter access token validity period expires_in = expires_in - time_buffer # set environment variables os.environ['ACCESS_TOKEN'] = access_token os.environ['VALID_UNTIL'] = str(time_now + expires_in) return access_token ","def get_access_token(refresh_token: str): """""" Gets access token from os environment if it was saved there. Else, generates access token from refresh token. Args: refresh_token(str): refresh token to obtain access token, to send mails using gmail API Returns: access_token(str): access token is used to send mails using gmail API """""" access_token = os.getenv('ACCESS_TOKEN') valid_until = int(os.getenv('VALID_UNTIL')) if os.getenv('VALID_UNTIL') else None # check if access token is valid if access_token and valid_until: if int(datetime.now().timestamp()) < valid_until: return access_token if not refresh_token: print(f""Error obtaining access token. Failed sending mails."") sys.exit(1) # else access token should be obtained from refresh token http_client = httplib2.Http() body = { 'refresh_token': refresh_token, 'client_id': GMAIL_CLIENT_ID, 'grant_type': 'refresh_token', } resp, content = http_client.request(TOKEN_URL, ""POST"", urllib.parse.urlencode(body), TOKEN_FORM_HEADERS) if resp.status not in [200, 201]: print(f""Failed to send emails: error obtaining access token\n{content}."") sys.exit(1) parsed_response = json.loads(content) access_token = parsed_response.get('access_token') expires_in = parsed_response.get('expires_in', 3595) time_now = int(datetime.now().timestamp()) time_buffer = 5 # seconds by which to shorten the validity period if expires_in - time_buffer > 0: # err on the side of caution with a slightly shorter access token validity period expires_in = expires_in - time_buffer # set environment variables os.environ['ACCESS_TOKEN'] = access_token os.environ['VALID_UNTIL'] = str(time_now + expires_in) return access_token " 39752,"def numpy_img2d_color_mean(img, seg): """""" compute color means by numpy :param ndarray img: input RGB image :param ndarray seg: segmentation og the image :return: np.array matrix features per segment .. seealso:: :func:`imsegm.descriptors.cython_img2d_color_mean` >>> image = np.zeros((2, 10, 3)) >>> image[:, 2:6, 0] = 1 >>> image[:, 3:8, 1] = 3 >>> image[:, 4:9, 2] = 2 >>> segm = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], ... [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]]) >>> numpy_img2d_color_mean(image, segm) array([[ 0.6, 1.2, 0.4], [ 0.2, 1.8, 1.6]]) """""" logging.debug( 'computing Colour mean for image %r & segm %r with' ' %i segments', img.shape, seg.shape, np.max(seg) ) _check_color_image_segm(img, seg) nb_labels = np.max(seg) + 1 means = np.zeros((nb_labels, 3)) counts = np.zeros(nb_labels) for i in range(seg.shape[0]): for j in range(seg.shape[1]): lb = seg[i, j] means[lb, :] += img[i, j, :] counts[lb] += 1 # prevent dividing by 0 counts[counts == 0] = -1 means = (means / np.tile(counts, (3, 1)).T.astype(float)) # preventing negative zeros # means[means == 0] = 0 return means ","def numpy_img2d_color_mean(img, seg): """""" compute color means by numpy :param ndarray img: input RGB image :param ndarray seg: segmentation og the image :return: np.array matrix features per segment .. seealso:: :func:`imsegm.descriptors.cython_img2d_color_mean` >>> image = np.zeros((2, 10, 3)) >>> image[:, 2:6, 0] = 1 >>> image[:, 3:8, 1] = 3 >>> image[:, 4:9, 2] = 2 >>> segm = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], ... [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]]) >>> numpy_img2d_color_mean(image, segm) array([[ 0.6, 1.2, 0.4], [ 0.2, 1.8, 1.6]]) """""" logging.debug( 'computing Colour mean for image %r & segm %r with %i segments', img.shape, seg.shape, np.max(seg) ) _check_color_image_segm(img, seg) nb_labels = np.max(seg) + 1 means = np.zeros((nb_labels, 3)) counts = np.zeros(nb_labels) for i in range(seg.shape[0]): for j in range(seg.shape[1]): lb = seg[i, j] means[lb, :] += img[i, j, :] counts[lb] += 1 # prevent dividing by 0 counts[counts == 0] = -1 means = (means / np.tile(counts, (3, 1)).T.astype(float)) # preventing negative zeros # means[means == 0] = 0 return means " 8564,"def validate_boot_remote_file(value: str) -> bool: """""" This validates if the passed value is a valid value for ``remote_boot_{kernel,initrd}``. :param value: Must be valid URI starting with http or tftp. ftp is not supported and thus invalid. :return: False in any case. If value is valid, ``True`` is returned. """""" if not isinstance(value, str): return False parsed_url = urlparse(value) # Check that it starts with http / tftp if parsed_url.scheme not in (""http"", ""tftp""): return False # Check the port # FIXME: Allow ports behind the hostname and check if they are allowed # Check we have magic @@server@@ if parsed_url.netloc.startswith(""@@"") and parsed_url.netloc.endswith(""server@@""): return True # If not magic @@server@@ then assume IPv4/v6 if netaddr.valid_ipv4(parsed_url.netloc) or netaddr.valid_ipv6(parsed_url.netloc): return True # If not magic or IPv4/v6 then it must be a valid hostname # To check that we remove the protocol and get then everything to the first slash host = value[7:].split(""/"", 1)[0] if RE_URL.match(host): return True return False ","def validate_boot_remote_file(value: str) -> bool: """""" This validates if the passed value is a valid value for ``remote_boot_{kernel,initrd}``. :param value: Must be a valid URI starting with http or tftp. ftp is not supported and thus invalid. :return: False in any case. If value is valid, ``True`` is returned. """""" if not isinstance(value, str): return False parsed_url = urlparse(value) # Check that it starts with http / tftp if parsed_url.scheme not in (""http"", ""tftp""): return False # Check the port # FIXME: Allow ports behind the hostname and check if they are allowed # Check we have magic @@server@@ if parsed_url.netloc.startswith(""@@"") and parsed_url.netloc.endswith(""server@@""): return True # If not magic @@server@@ then assume IPv4/v6 if netaddr.valid_ipv4(parsed_url.netloc) or netaddr.valid_ipv6(parsed_url.netloc): return True # If not magic or IPv4/v6 then it must be a valid hostname # To check that we remove the protocol and get then everything to the first slash host = value[7:].split(""/"", 1)[0] if RE_URL.match(host): return True return False " 20555,"def main(argv=None): parser = get_parser() arguments = parser.parse_args(argv) verbose = arguments.v set_loglevel(verbose=verbose) fname_in = os.path.abspath(arguments.i) fname_seg = os.path.abspath(arguments.s) contrast = arguments.c path_template = os.path.abspath(arguments.t) scale_dist = arguments.scale_dist path_output = os.path.abspath(arguments.ofolder) fname_disc = arguments.discfile if fname_disc is not None: fname_disc = os.path.abspath(fname_disc) initz = arguments.initz initcenter = arguments.initcenter fname_initlabel = arguments.initlabel if fname_initlabel is not None: fname_initlabel = os.path.abspath(fname_initlabel) remove_temp_files = arguments.r clean_labels = arguments.clean_labels path_tmp = tmp_create(basename=""label_vertebrae"") # Copying input data to tmp folder printv('\nCopying input data to tmp folder...', verbose) Image(fname_in).save(os.path.join(path_tmp, ""data.nii"")) Image(fname_seg).save(os.path.join(path_tmp, ""segmentation.nii"")) # Go go temp folder curdir = os.getcwd() os.chdir(path_tmp) # Straighten spinal cord printv('\nStraighten spinal cord...', verbose) # check if warp_curve2straight and warp_straight2curve already exist (i.e. no need to do it another time) cache_sig = cache_signature( input_files=[fname_in, fname_seg], ) fname_cache = ""straightening.cache"" if (cache_valid(os.path.join(curdir, fname_cache), cache_sig) and os.path.isfile(os.path.join(curdir, ""warp_curve2straight.nii.gz"")) and os.path.isfile(os.path.join(curdir, ""warp_straight2curve.nii.gz"")) and os.path.isfile(os.path.join(curdir, ""straight_ref.nii.gz""))): # if they exist, copy them into current folder printv('Reusing existing warping field which seems to be valid', verbose, 'warning') copy(os.path.join(curdir, ""warp_curve2straight.nii.gz""), 'warp_curve2straight.nii.gz') copy(os.path.join(curdir, ""warp_straight2curve.nii.gz""), 'warp_straight2curve.nii.gz') copy(os.path.join(curdir, ""straight_ref.nii.gz""), 'straight_ref.nii.gz') # apply straightening s, o = run_proc(['sct_apply_transfo', '-i', 'data.nii', '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', 'data_straight.nii']) else: sct_straighten_spinalcord.main(argv=[ '-i', 'data.nii', '-s', 'segmentation.nii', '-r', str(remove_temp_files), '-v', '0', ]) cache_save(os.path.join(path_output, fname_cache), cache_sig) # resample to 0.5mm isotropic to match template resolution printv('\nResample to 0.5mm isotropic...', verbose) s, o = run_proc(['sct_resample', '-i', 'data_straight.nii', '-mm', '0.5x0.5x0.5', '-x', 'linear', '-o', 'data_straightr.nii'], verbose=verbose) # Apply straightening to segmentation # N.B. Output is RPI printv('\nApply straightening to segmentation...', verbose) sct_apply_transfo.main(['-i', 'segmentation.nii', '-d', 'data_straightr.nii', '-w', 'warp_curve2straight.nii.gz', '-o', 'segmentation_straight.nii', '-x', 'linear', '-v', '0']) # Threshold segmentation at 0.5 img = Image('segmentation_straight.nii') img.data = threshold(img.data, 0.5) img.save() # If disc label file is provided, label vertebrae using that file instead of automatically if fname_disc: # Apply straightening to disc-label printv('\nApply straightening to disc labels...', verbose) run_proc('sct_apply_transfo -i %s -d %s -w %s -o %s -x %s' % (fname_disc, 'data_straightr.nii', 'warp_curve2straight.nii.gz', 'labeldisc_straight.nii.gz', 'label'), verbose=verbose ) label_vert('segmentation_straight.nii', 'labeldisc_straight.nii.gz', verbose=1) else: printv('\nCreate label to identify disc...', verbose) fname_labelz = os.path.join(path_tmp, 'labelz.nii.gz') if initcenter is not None: # find z centered in FOV nii = Image('segmentation.nii').change_orientation(""RPI"") nx, ny, nz, nt, px, py, pz, pt = nii.dim z_center = round(nz / 2) initz = [z_center, initcenter] if initz is not None: im_label = create_labels_along_segmentation(Image('segmentation.nii'), [tuple(initz)]) im_label.save(fname_labelz) elif fname_initlabel is not None: Image(fname_initlabel).save(fname_labelz) else: # automatically finds C2-C3 disc im_data = Image('data.nii') im_seg = Image('segmentation.nii') # because verbose is also used for keeping temp files verbose_detect_c2c3 = 0 if remove_temp_files else 2 im_label_c2c3 = detect_c2c3(im_data, im_seg, contrast, verbose=verbose_detect_c2c3) ind_label = np.where(im_label_c2c3.data) if np.size(ind_label) == 0: printv('Automatic C2-C3 detection failed. Please provide manual label with sct_label_utils', 1, 'error') sys.exit(1) im_label_c2c3.data[ind_label] = 3 im_label_c2c3.save(fname_labelz) # dilate label so it is not lost when applying warping dilate(Image(fname_labelz), 3, 'ball').save(fname_labelz) # Apply straightening to z-label printv('\nAnd apply straightening to label...', verbose) sct_apply_transfo.main(['-i', 'labelz.nii.gz', '-d', 'data_straightr.nii', '-w', 'warp_curve2straight.nii.gz', '-o', 'labelz_straight.nii.gz', '-x', 'nn', '-v', '0']) # get z value and disk value to initialize labeling printv('\nGet z and disc values from straight label...', verbose) init_disc = get_z_and_disc_values_from_label('labelz_straight.nii.gz') printv('.. ' + str(init_disc), verbose) # apply laplacian filtering if arguments.laplacian: printv('\nApply Laplacian filter...', verbose) img = Image(""data_straightr.nii"") # apply std dev to each axis of the image sigmas = [1 for i in range(len(img.data.shape))] # adjust sigma based on voxel size sigmas = [sigmas[i] / img.dim[i + 4] for i in range(3)] # smooth data img.data = laplacian(img.data, sigmas) img.save() # detect vertebral levels on straight spinal cord init_disc[1] = init_disc[1] - 1 vertebral_detection('data_straightr.nii', 'segmentation_straight.nii', contrast, arguments.param, init_disc=init_disc, verbose=verbose, path_template=path_template, path_output=path_output, scale_dist=scale_dist) # un-straighten labeled spinal cord printv('\nUn-straighten labeling...', verbose) sct_apply_transfo.main(['-i', 'segmentation_straight_labeled.nii', '-d', 'segmentation.nii', '-w', 'warp_straight2curve.nii.gz', '-o', 'segmentation_labeled.nii', '-x', 'nn', '-v', '0']) if clean_labels == 1: printv('\nClean labeled segmentation (remove labels outside segmentation)...', verbose) clean_extra_labels('segmentation_labeled.nii', 'segmentation.nii') elif clean_labels == 2: printv('\nClean labeled segmentation (remove labels outside segmentation and fill in missing labels)...', verbose) clean_extra_and_missing_labels('segmentation_labeled.nii', 'segmentation.nii') # label discs printv('\nLabel discs...', verbose) printv('\nUn-straighten labeled discs...', verbose) run_proc('sct_apply_transfo -i %s -d %s -w %s -o %s -x %s' % ('segmentation_straight_labeled_disc.nii', 'segmentation.nii', 'warp_straight2curve.nii.gz', 'segmentation_labeled_disc.nii', 'label'), verbose=verbose, is_sct_binary=True, ) # come back os.chdir(curdir) # Generate output files path_seg, file_seg, ext_seg = extract_fname(fname_seg) fname_seg_labeled = os.path.join(path_output, file_seg + '_labeled' + ext_seg) printv('\nGenerate output files...', verbose) generate_output_file(os.path.join(path_tmp, ""segmentation_labeled.nii""), fname_seg_labeled) generate_output_file(os.path.join(path_tmp, ""segmentation_labeled_disc.nii""), os.path.join(path_output, file_seg + '_labeled_discs' + ext_seg)) # copy straightening files in case subsequent SCT functions need them generate_output_file(os.path.join(path_tmp, ""warp_curve2straight.nii.gz""), os.path.join(path_output, ""warp_curve2straight.nii.gz""), verbose=verbose) generate_output_file(os.path.join(path_tmp, ""warp_straight2curve.nii.gz""), os.path.join(path_output, ""warp_straight2curve.nii.gz""), verbose=verbose) generate_output_file(os.path.join(path_tmp, ""straight_ref.nii.gz""), os.path.join(path_output, ""straight_ref.nii.gz""), verbose=verbose) # Remove temporary files if remove_temp_files == 1: printv('\nRemove temporary files...', verbose) rmtree(path_tmp) # Generate QC report if arguments.qc is not None: path_qc = os.path.abspath(arguments.qc) qc_dataset = arguments.qc_dataset qc_subject = arguments.qc_subject labeled_seg_file = os.path.join(path_output, file_seg + '_labeled' + ext_seg) generate_qc(fname_in, fname_seg=labeled_seg_file, args=argv, path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject, process='sct_label_vertebrae') display_viewer_syntax([fname_in, fname_seg_labeled], colormaps=['', 'subcortical'], opacities=['1', '0.5']) ","def main(argv=None): parser = get_parser() arguments = parser.parse_args(argv) verbose = arguments.v set_loglevel(verbose=verbose) fname_in = os.path.abspath(arguments.i) fname_seg = os.path.abspath(arguments.s) contrast = arguments.c path_template = os.path.abspath(arguments.t) scale_dist = arguments.scale_dist path_output = os.path.abspath(arguments.ofolder) fname_disc = arguments.discfile if fname_disc is not None: fname_disc = os.path.abspath(fname_disc) initz = arguments.initz initcenter = arguments.initcenter fname_initlabel = arguments.initlabel if fname_initlabel is not None: fname_initlabel = os.path.abspath(fname_initlabel) remove_temp_files = arguments.r clean_labels = arguments.clean_labels path_tmp = tmp_create(basename=""label_vertebrae"") # Copying input data to tmp folder printv('\nCopying input data to tmp folder...', verbose) Image(fname_in).save(os.path.join(path_tmp, ""data.nii"")) Image(fname_seg).save(os.path.join(path_tmp, ""segmentation.nii"")) # Go go temp folder curdir = os.getcwd() os.chdir(path_tmp) # Straighten spinal cord printv('\nStraighten spinal cord...', verbose) # check if warp_curve2straight and warp_straight2curve already exist (i.e. no need to do it another time) cache_sig = cache_signature( input_files=[fname_in, fname_seg], ) fname_cache = ""straightening.cache"" if (cache_valid(os.path.join(curdir, fname_cache), cache_sig) and os.path.isfile(os.path.join(curdir, ""warp_curve2straight.nii.gz"")) and os.path.isfile(os.path.join(curdir, ""warp_straight2curve.nii.gz"")) and os.path.isfile(os.path.join(curdir, ""straight_ref.nii.gz""))): # if they exist, copy them into current folder printv('Reusing existing warping field which seems to be valid', verbose, 'warning') copy(os.path.join(curdir, ""warp_curve2straight.nii.gz""), 'warp_curve2straight.nii.gz') copy(os.path.join(curdir, ""warp_straight2curve.nii.gz""), 'warp_straight2curve.nii.gz') copy(os.path.join(curdir, ""straight_ref.nii.gz""), 'straight_ref.nii.gz') # apply straightening s, o = run_proc(['sct_apply_transfo', '-i', 'data.nii', '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', 'data_straight.nii']) else: sct_straighten_spinalcord.main(argv=[ '-i', 'data.nii', '-s', 'segmentation.nii', '-r', str(remove_temp_files), '-v', '0', ]) cache_save(os.path.join(path_output, fname_cache), cache_sig) # resample to 0.5mm isotropic to match template resolution printv('\nResample to 0.5mm isotropic...', verbose) s, o = run_proc(['sct_resample', '-i', 'data_straight.nii', '-mm', '0.5x0.5x0.5', '-x', 'linear', '-o', 'data_straightr.nii'], verbose=verbose) # Apply straightening to segmentation # N.B. Output is RPI printv('\nApply straightening to segmentation...', verbose) sct_apply_transfo.main(['-i', 'segmentation.nii', '-d', 'data_straightr.nii', '-w', 'warp_curve2straight.nii.gz', '-o', 'segmentation_straight.nii', '-x', 'linear', '-v', '0']) # Threshold segmentation at 0.5 img = Image('segmentation_straight.nii') img.data = threshold(img.data, 0.5) img.save() # If disc label file is provided, label vertebrae using that file instead of automatically if fname_disc: # Apply straightening to disc-label printv('\nApply straightening to disc labels...', verbose) run_proc('sct_apply_transfo -i %s -d %s -w %s -o %s -x %s' % (fname_disc, 'data_straightr.nii', 'warp_curve2straight.nii.gz', 'labeldisc_straight.nii.gz', 'label'), verbose=verbose ) label_vert('segmentation_straight.nii', 'labeldisc_straight.nii.gz', verbose=1) else: printv('\nCreate label to identify disc...', verbose) fname_labelz = os.path.join(path_tmp, 'labelz.nii.gz') if initcenter is not None: # find z centered in FOV nii = Image('segmentation.nii').change_orientation(""RPI"") nx, ny, nz, nt, px, py, pz, pt = nii.dim z_center = round(nz / 2) initz = [z_center, initcenter] if initz is not None: im_label = create_labels_along_segmentation(Image('segmentation.nii'), [tuple(initz)]) im_label.save(fname_labelz) elif fname_initlabel is not None: Image(fname_initlabel).save(fname_labelz) else: # automatically finds C2-C3 disc im_data = Image('data.nii') im_seg = Image('segmentation.nii') # because verbose is also used for keeping temp files verbose_detect_c2c3 = 0 if remove_temp_files else 2 im_label_c2c3 = detect_c2c3(im_data, im_seg, contrast, verbose=verbose_detect_c2c3) ind_label = np.where(im_label_c2c3.data) if np.size(ind_label) == 0: printv('Automatic C2-C3 detection failed. Please provide manual label with sct_label_utils', 1, 'error') sys.exit(1) im_label_c2c3.data[ind_label] = 3 im_label_c2c3.save(fname_labelz) # dilate label so it is not lost when applying warping dilate(Image(fname_labelz), 3, 'ball').save(fname_labelz) # Apply straightening to z-label printv('\nAnd apply straightening to label...', verbose) sct_apply_transfo.main(['-i', 'labelz.nii.gz', '-d', 'data_straightr.nii', '-w', 'warp_curve2straight.nii.gz', '-o', 'labelz_straight.nii.gz', '-x', 'nn', '-v', '0']) # get z value and disk value to initialize labeling printv('\nGet z and disc values from straight label...', verbose) init_disc = get_z_and_disc_values_from_label('labelz_straight.nii.gz') printv('.. ' + str(init_disc), verbose) # apply laplacian filtering if arguments.laplacian: printv('\nApply Laplacian filter...', verbose) img = Image(""data_straightr.nii"") # apply std dev to each axis of the image sigmas = [1 for i in range(len(img.data.shape))] # adjust sigma based on voxel size sigmas = [sigmas[i] / img.dim[i + 4] for i in range(3)] # smooth data img.data = laplacian(img.data, sigmas) img.save() # detect vertebral levels on straight spinal cord init_disc[1] = init_disc[1] - 1 vertebral_detection('data_straightr.nii', 'segmentation_straight.nii', contrast, arguments.param, init_disc=init_disc, verbose=verbose, path_template=path_template, path_output=path_output, scale_dist=scale_dist) # un-straighten labeled spinal cord printv('\nUn-straighten labeling...', verbose) sct_apply_transfo.main(['-i', 'segmentation_straight_labeled.nii', '-d', 'segmentation.nii', '-w', 'warp_straight2curve.nii.gz', '-o', 'segmentation_labeled.nii', '-x', 'nn', '-v', '0']) if clean_labels == 1: printv('\nCleaning labeled segmentation (removing labeled voxels outside segmentation)...', verbose) clean_extra_labels('segmentation_labeled.nii', 'segmentation.nii') elif clean_labels == 2: printv('\nClean labeled segmentation (remove labels outside segmentation and fill in missing labels)...', verbose) clean_extra_and_missing_labels('segmentation_labeled.nii', 'segmentation.nii') # label discs printv('\nLabel discs...', verbose) printv('\nUn-straighten labeled discs...', verbose) run_proc('sct_apply_transfo -i %s -d %s -w %s -o %s -x %s' % ('segmentation_straight_labeled_disc.nii', 'segmentation.nii', 'warp_straight2curve.nii.gz', 'segmentation_labeled_disc.nii', 'label'), verbose=verbose, is_sct_binary=True, ) # come back os.chdir(curdir) # Generate output files path_seg, file_seg, ext_seg = extract_fname(fname_seg) fname_seg_labeled = os.path.join(path_output, file_seg + '_labeled' + ext_seg) printv('\nGenerate output files...', verbose) generate_output_file(os.path.join(path_tmp, ""segmentation_labeled.nii""), fname_seg_labeled) generate_output_file(os.path.join(path_tmp, ""segmentation_labeled_disc.nii""), os.path.join(path_output, file_seg + '_labeled_discs' + ext_seg)) # copy straightening files in case subsequent SCT functions need them generate_output_file(os.path.join(path_tmp, ""warp_curve2straight.nii.gz""), os.path.join(path_output, ""warp_curve2straight.nii.gz""), verbose=verbose) generate_output_file(os.path.join(path_tmp, ""warp_straight2curve.nii.gz""), os.path.join(path_output, ""warp_straight2curve.nii.gz""), verbose=verbose) generate_output_file(os.path.join(path_tmp, ""straight_ref.nii.gz""), os.path.join(path_output, ""straight_ref.nii.gz""), verbose=verbose) # Remove temporary files if remove_temp_files == 1: printv('\nRemove temporary files...', verbose) rmtree(path_tmp) # Generate QC report if arguments.qc is not None: path_qc = os.path.abspath(arguments.qc) qc_dataset = arguments.qc_dataset qc_subject = arguments.qc_subject labeled_seg_file = os.path.join(path_output, file_seg + '_labeled' + ext_seg) generate_qc(fname_in, fname_seg=labeled_seg_file, args=argv, path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject, process='sct_label_vertebrae') display_viewer_syntax([fname_in, fname_seg_labeled], colormaps=['', 'subcortical'], opacities=['1', '0.5']) " 48464,"def deprecation_schema(for_collection): main_fields = { Required('why'): Any(*string_types), Required('alternative'): Any(*string_types), Required('removed_from_collection'): collection_name, 'removed': Any(True), } date_schema = { Required('removed_at_date'): date(), } date_schema.update(main_fields) if for_collection: version_schema = {Required('removed_in'): version(for_collection)} else: version_schema = {Required('removed_in'): deprecation_versions()} version_schema.update(main_fields) result = Any( Schema(version_schema, extra=PREVENT_EXTRA), Schema(date_schema, extra=PREVENT_EXTRA), ) if for_collection: result = All( result, partial(check_removal_version, version_field='removed_in', collection_name_field='removed_from_collection', error_code='invalid-removal-version')) return result ","def deprecation_schema(for_collection): main_fields = { Required('why'): Any(*string_types), Required('alternative'): Any(*string_types), Required('removed_from_collection'): collection_name, 'removed': Any(True), } date_schema = { Required('removed_at_date'): date(), } date_schema.update(main_fields) if for_collection: version_schema = {Required('removed_in'): version(for_collection)} else: version_schema = { Required('removed_in'): deprecation_versions(), } version_schema.update(main_fields) result = Any( Schema(version_schema, extra=PREVENT_EXTRA), Schema(date_schema, extra=PREVENT_EXTRA), ) if for_collection: result = All( result, partial(check_removal_version, version_field='removed_in', collection_name_field='removed_from_collection', error_code='invalid-removal-version')) return result " 57808,"def splunk_edit_notable_event_command(proxy): if not proxy: os.environ[""HTTPS_PROXY""] = """" os.environ[""HTTP_PROXY""] = """" os.environ[""https_proxy""] = """" os.environ[""http_proxy""] = """" baseurl = 'https://' + demisto.params()['host'] + ':' + demisto.params()['port'] + '/' username = demisto.params()['authentication']['identifier'] password = demisto.params()['authentication']['password'] if username == '_token': headers = {""Authorization"": ""Bearer {}"".format(password)} auth_req = requests.post( baseurl + 'services/auth/login', data={'output_mode': 'json'}, headers=headers, verify=VERIFY_CERTIFICATE ) else: auth_req = requests.post( baseurl + 'services/auth/login', data={'username': username, 'password': password, 'output_mode': 'json'}, verify=VERIFY_CERTIFICATE ) sessionKey = auth_req.json()['sessionKey'] eventIDs = None if demisto.get(demisto.args(), 'eventIDs'): eventIDsStr = demisto.args()['eventIDs'] eventIDs = eventIDsStr.split("","") status = None if demisto.get(demisto.args(), 'status'): status = int(demisto.args()['status']) response_info = updateNotableEvents(sessionKey=sessionKey, baseurl=baseurl, comment=demisto.get(demisto.args(), 'comment'), status=status, urgency=demisto.get(demisto.args(), 'urgency'), owner=demisto.get(demisto.args(), 'owner'), eventIDs=eventIDs) if 'success' not in response_info or not response_info['success']: demisto.results({'ContentsFormat': formats['text'], 'Type': entryTypes['error'], 'Contents': ""Could not update notable "" ""events: "" + demisto.args()['eventIDs'] + ' : ' + str(response_info)}) demisto.results('Splunk ES Notable events: ' + response_info.get('message')) ","def splunk_edit_notable_event_command(proxy): if not proxy: os.environ[""HTTPS_PROXY""] = """" os.environ[""HTTP_PROXY""] = """" os.environ[""https_proxy""] = """" os.environ[""http_proxy""] = """" baseurl = 'https://' + demisto.params()['host'] + ':' + demisto.params()['port'] + '/' username = demisto.params()['authentication']['identifier'] password = demisto.params()['authentication']['password'] data={'output_mode': 'json'} headers = {} if username == '_token': headers = {""Authorization"": ""Bearer {}"".format(password)} else: data={'username': username, 'password': password, 'output_mode': 'json'} auth_req = requests.post( baseurl + 'services/auth/login', data=data, headers=headers, verify=VERIFY_CERTIFICATE ) sessionKey = auth_req.json()['sessionKey'] eventIDs = None if demisto.get(demisto.args(), 'eventIDs'): eventIDsStr = demisto.args()['eventIDs'] eventIDs = eventIDsStr.split("","") status = None if demisto.get(demisto.args(), 'status'): status = int(demisto.args()['status']) response_info = updateNotableEvents(sessionKey=sessionKey, baseurl=baseurl, comment=demisto.get(demisto.args(), 'comment'), status=status, urgency=demisto.get(demisto.args(), 'urgency'), owner=demisto.get(demisto.args(), 'owner'), eventIDs=eventIDs) if 'success' not in response_info or not response_info['success']: demisto.results({'ContentsFormat': formats['text'], 'Type': entryTypes['error'], 'Contents': ""Could not update notable "" ""events: "" + demisto.args()['eventIDs'] + ' : ' + str(response_info)}) demisto.results('Splunk ES Notable events: ' + response_info.get('message')) " 15454,"def validate_config(api, data) -> bool: """"""Test if auth and email are OK."""""" try: try: # maybe check here it is a bot token as personal access tokens expire after 12 hours. _LOGGER.debug(""Authenticating with Webex"") person_me = api.people.me() _LOGGER.debug(""Authenticated OK."") _LOGGER.debug(""api.people.me: %s"", person_me) if person_me.type != ""bot"": _LOGGER.error( ""Although auth passed, an invalid token type is being used: %s"", person_me.type, ) raise InvalidAuthTokenType email = data[CONF_EMAIL] _LOGGER.debug(""Searching Webex for people with email: '%s'"", email) person = next(iter(api.people.list(email=email)), None) if person is not None: _LOGGER.debug( ""Found person with email: '%s' success. person: %s"", email, person, ) data[DATA_DISPLAY_NAME] = person.displayName return True _LOGGER.error(""Cannot find any Webex user with email: %s"", email) raise EmailNotFound except webexteamssdk.ApiError as error: _LOGGER.error(error) if error.status_code == 400: raise EmailNotFound from error if error.status_code == 401: raise InvalidAuth from error raise error except requests.exceptions.ConnectionError as connection_error: _LOGGER.error(connection_error) raise CannotConnect from connection_error ","def validate_config(api, data) -> bool: """"""Test if auth and email are OK."""""" try: try: # maybe check here it is a bot token as personal access tokens expire after 12 hours. _LOGGER.debug(""Authenticating with Webex"") person_me = api.people.me() _LOGGER.debug(""Authenticated OK"") _LOGGER.debug(""api.people.me: %s"", person_me) if person_me.type != ""bot"": _LOGGER.error( ""Although auth passed, an invalid token type is being used: %s"", person_me.type, ) raise InvalidAuthTokenType email = data[CONF_EMAIL] _LOGGER.debug(""Searching Webex for people with email: '%s'"", email) person = next(iter(api.people.list(email=email)), None) if person is not None: _LOGGER.debug( ""Found person with email: '%s' success. person: %s"", email, person, ) data[DATA_DISPLAY_NAME] = person.displayName return True _LOGGER.error(""Cannot find any Webex user with email: %s"", email) raise EmailNotFound except webexteamssdk.ApiError as error: _LOGGER.error(error) if error.status_code == 400: raise EmailNotFound from error if error.status_code == 401: raise InvalidAuth from error raise error except requests.exceptions.ConnectionError as connection_error: _LOGGER.error(connection_error) raise CannotConnect from connection_error " 5458,"def X(host, nameserver=None): """""" .. versionadded:: 9999.9 Return the PTR record for ``host``. Always returns a list. CLI Example: .. code-block:: bash salt ns1 dig.X 1.2.3.4 """""" dig = [""dig"", ""+short"", ""-x"", str(host)] if nameserver is not None: dig.append(""@{}"".format(nameserver)) cmd = __salt__[""cmd.run_all""](dig, python_shell=False) # In this case, 0 is not the same as False if cmd[""retcode""] != 0: log.warning( ""dig returned exit code '%s'. Returning empty list as fallback."", cmd[""retcode""], ) return [] return [i for i in cmd[""stdout""].split(""\n"")] ","def X(host, nameserver=None): """""" .. versionadded:: 3006.0 Return the PTR record for ``host``. Always returns a list. CLI Example: .. code-block:: bash salt ns1 dig.X 1.2.3.4 """""" dig = [""dig"", ""+short"", ""-x"", str(host)] if nameserver is not None: dig.append(""@{}"".format(nameserver)) cmd = __salt__[""cmd.run_all""](dig, python_shell=False) # In this case, 0 is not the same as False if cmd[""retcode""] != 0: log.warning( ""dig returned exit code '%s'. Returning empty list as fallback."", cmd[""retcode""], ) return [] return [i for i in cmd[""stdout""].split(""\n"")] " 17343,"def zeros_like(other, dtype: DTypeLike = None): """"""Return a new object of zeros with the same shape and type as a given dataarray or dataset. Parameters ---------- other : DataArray, Dataset, or Variable The reference object in input dtype : dtype, optional dtype of the new array. If omitted, it defaults to other.dtype. Returns ------- out : same as object New object of zeros with the same shape and type as other. Examples -------- >>> import numpy as np >>> import xarray as xr >>> x = xr.DataArray(np.arange(6).reshape(2, 3), ... dims=['lat', 'lon'], ... coords={'lat': [1, 2], 'lon': [0, 1, 2]}) >>> x array([[0, 1, 2], [3, 4, 5]]) Coordinates: * lat (lat) int64 1 2 * lon (lon) int64 0 1 2 >>> xr.zeros_like(x) array([[0, 0, 0], [0, 0, 0]]) Coordinates: * lat (lat) int64 1 2 * lon (lon) int64 0 1 2 >>> xr.zeros_like(x, dtype=np.float) array([[0., 0., 0.], [0., 0., 0.]]) Coordinates: * lat (lat) int64 1 2 * lon (lon) int64 0 1 2 See also -------- ones_like full_like """""" return full_like(other, 0, dtype) ","def zeros_like(other, dtype: DTypeLike = None): """"""Return a new object of zeros with the same shape and type as a given dataarray or dataset. Parameters ---------- other : DataArray, Dataset, or Variable The reference object. The output will have the same dimensions and coordinates as this object. dtype : dtype, optional dtype of the new array. If omitted, it defaults to other.dtype. Returns ------- out : same as object New object of zeros with the same shape and type as other. Examples -------- >>> import numpy as np >>> import xarray as xr >>> x = xr.DataArray(np.arange(6).reshape(2, 3), ... dims=['lat', 'lon'], ... coords={'lat': [1, 2], 'lon': [0, 1, 2]}) >>> x array([[0, 1, 2], [3, 4, 5]]) Coordinates: * lat (lat) int64 1 2 * lon (lon) int64 0 1 2 >>> xr.zeros_like(x) array([[0, 0, 0], [0, 0, 0]]) Coordinates: * lat (lat) int64 1 2 * lon (lon) int64 0 1 2 >>> xr.zeros_like(x, dtype=np.float) array([[0., 0., 0.], [0., 0., 0.]]) Coordinates: * lat (lat) int64 1 2 * lon (lon) int64 0 1 2 See also -------- ones_like full_like """""" return full_like(other, 0, dtype) " 5085,"def pick_fn(event): adjust_colorbar(event.mouseevent) ","def on_pick(event): adjust_colorbar(event.mouseevent) " 44212,"def unitary_cost(n, rank_r, rank_m, br=7, aleph=10, beth=20): r""""""Return the number of Toffoli gates needed to implement the qubitization unitary operator. The expression for computing the cost is taken from [`arXiv:2011.03494 `_]. Args: n (int): number of molecular orbitals rank_r (int): the rank of the first factorization step rank_m (int): the average rank of the second factorization step br (int): number of bits for ancilla qubit rotation aleph (int): number of bits for the keep register beth (int): number of bits for the rotation angles Returns: int: the number of Toffoli gates to implement the qubitization unitary **Example** >>> n = 14 >>> rank_r = 26 >>> rank_m = 5.5 >>> br = 7 >>> aleph = 10 >>> beth = 20 >>> unitary_cost(n, norm, error, rank_r, rank_m, br, aleph, beth) 2007 """""" eta = np.array([np.log2(n) for n in range(1, rank_r + 1) if rank_r % n == 0]) eta = int(np.max([n for n in eta if n % 1 == 0])) nxi = np.ceil(np.log2(rank_m)) nlxi = np.ceil(np.log2(rank_r * rank_m + n / 2)) nl = np.ceil(np.log2(rank_r + 1)) bp1 = nl + aleph bp2 = nxi + aleph + 2 bo = nxi + nlxi + br + 1 rank_rm = rank_r * rank_m cost = 9 * nl - 6 * eta + 12 * br + 34 * nxi + 8 * nlxi + 9 * aleph + 3 * n * beth - 6 * n - 43 cost += qrom_cost((rank_r, 1, 0, bp1, -1))[0] cost += qrom_cost((rank_r, 1, 0, bo, -1))[0] cost += qrom_cost((rank_r, 1, 0, 1, 0))[0] * 2 cost += qrom_cost((rank_rm, n / 2, rank_rm, n * beth, 0))[0] cost += qrom_cost((rank_rm, n / 2, rank_rm, 2, 0))[0] * 2 cost += qrom_cost((rank_rm, n / 2, rank_rm, 2 * bp2, -1))[0] return int(cost) ","def unitary_cost(n, rank_r, rank_m, br=7, aleph=10, beth=20): r""""""Return the number of Toffoli gates needed to implement the qubitization unitary operator. The expression for computing the cost is taken from [`arXiv:2011.03494 `_]. Args: n (int): number of molecular orbitals rank_r (int): rank of the first factorization step rank_m (int): the average rank of the second factorization step br (int): number of bits for ancilla qubit rotation aleph (int): number of bits for the keep register beth (int): number of bits for the rotation angles Returns: int: the number of Toffoli gates to implement the qubitization unitary **Example** >>> n = 14 >>> rank_r = 26 >>> rank_m = 5.5 >>> br = 7 >>> aleph = 10 >>> beth = 20 >>> unitary_cost(n, norm, error, rank_r, rank_m, br, aleph, beth) 2007 """""" eta = np.array([np.log2(n) for n in range(1, rank_r + 1) if rank_r % n == 0]) eta = int(np.max([n for n in eta if n % 1 == 0])) nxi = np.ceil(np.log2(rank_m)) nlxi = np.ceil(np.log2(rank_r * rank_m + n / 2)) nl = np.ceil(np.log2(rank_r + 1)) bp1 = nl + aleph bp2 = nxi + aleph + 2 bo = nxi + nlxi + br + 1 rank_rm = rank_r * rank_m cost = 9 * nl - 6 * eta + 12 * br + 34 * nxi + 8 * nlxi + 9 * aleph + 3 * n * beth - 6 * n - 43 cost += qrom_cost((rank_r, 1, 0, bp1, -1))[0] cost += qrom_cost((rank_r, 1, 0, bo, -1))[0] cost += qrom_cost((rank_r, 1, 0, 1, 0))[0] * 2 cost += qrom_cost((rank_rm, n / 2, rank_rm, n * beth, 0))[0] cost += qrom_cost((rank_rm, n / 2, rank_rm, 2, 0))[0] * 2 cost += qrom_cost((rank_rm, n / 2, rank_rm, 2 * bp2, -1))[0] return int(cost) " 4201,"def _load_absorption(freqs): """"""Load molar extinction coefficients."""""" # Data from https://omlc.org/spectra/hemoglobin/summary.html # The text was copied to a text file. The text before and # after the table was deleted. The the following was run in # matlab # extinct_coef=importdata('extinction_coef.txt') # save('extinction_coef.mat', 'extinct_coef') # # Returns data as [[HbO2(freq1), HbO2(freq1), # [Hb(freq2)], Hb(freq2)]] from scipy.io import loadmat from scipy.interpolate import interp1d extinction_fname = op.join(op.dirname(mne.__file__), 'data', 'extinction_coef.mat') a = loadmat(extinction_fname)['extinct_coef'] interp_hbo = interp1d(a[:, 0], a[:, 1], kind='linear') interp_hb = interp1d(a[:, 0], a[:, 2], kind='linear') ext_coef = np.matrix([[interp_hbo(freqs[0]), interp_hb(freqs[0])], [interp_hbo(freqs[1]), interp_hb(freqs[1])]]) abs_coef = ext_coef * 0.2303 return abs_coef ","def _load_absorption(freqs): """"""Load molar extinction coefficients."""""" # Data from https://omlc.org/spectra/hemoglobin/summary.html # The text was copied to a text file. The text before and # after the table was deleted. The the following was run in # matlab # extinct_coef=importdata('extinction_coef.txt') # save('extinction_coef.mat', 'extinct_coef') # # Returns data as [[HbO2(freq1), HbO2(freq1), # [Hb(freq2)], Hb(freq2)]] from scipy.io import loadmat from scipy.interpolate import interp1d extinction_fname = op.join(op.dirname(__file__), '..', 'data', 'extinction_coef.mat') a = loadmat(extinction_fname)['extinct_coef'] interp_hbo = interp1d(a[:, 0], a[:, 1], kind='linear') interp_hb = interp1d(a[:, 0], a[:, 2], kind='linear') ext_coef = np.matrix([[interp_hbo(freqs[0]), interp_hb(freqs[0])], [interp_hbo(freqs[1]), interp_hb(freqs[1])]]) abs_coef = ext_coef * 0.2303 return abs_coef " 274,"def sample(draws=500, step=None, init='auto', n_init=200000, start=None, trace=None, chain_idx=0, chains=None, cores=None, tune=500, progressbar=True, model=None, random_seed=None, discard_tuned_samples=True, compute_convergence_checks=True, **kwargs): """"""Draw samples from the posterior using the given step methods. Multiple step methods are supported via compound step methods. Parameters ---------- draws : int The number of samples to draw. Defaults to 500. The number of tuned samples are discarded by default. See `discard_tuned_samples`. step : function or iterable of functions A step function or collection of functions. If there are variables without a step methods, step methods for those variables will be assigned automatically. init : str Initialization method to use for auto-assigned NUTS samplers. * auto : Choose a default initialization method automatically. Currently, this is `'jitter+adapt_diag'`, but this can change in the future. If you depend on the exact behaviour, choose an initialization method explicitly. * adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the variance of the tuning samples. All chains use the test value (usually the prior mean) as starting point. * jitter+adapt_diag : Same as `adapt_diag`\, but add uniform jitter in [-1, 1] to the starting point in each chain. * advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the sample variance of the tuning samples. * advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based on the variance of the gradients during tuning. This is **experimental** and might be removed in a future release. * advi : Run ADVI to estimate posterior mean and diagonal mass matrix. * advi_map: Initialize ADVI with MAP and use MAP as starting point. * map : Use the MAP as starting point. This is discouraged. * nuts : Run NUTS and estimate posterior mean and mass matrix from the trace. n_init : int Number of iterations of initializer. Only works for 'nuts' and 'ADVI'. If 'ADVI', number of iterations, if 'nuts', number of draws. start : dict, or array of dict Starting point in parameter space (or partial point) Defaults to `trace.point(-1))` if there is a trace provided and model.test_point if not (defaults to empty dict). Initialization methods for NUTS (see `init` keyword) can overwrite the default. For 'SMC' step method, `start` should be a list of dicts of length = `chains`. trace : backend, list, or MultiTrace This should be a backend instance, a list of variables to track, or a MultiTrace object with past values. If a MultiTrace object is given, it must contain samples for the chain number `chain`. If None or a list of variables, the NDArray backend is used. Passing either ""text"" or ""sqlite"" is taken as a shortcut to set up the corresponding backend (with ""mcmc"" used as the base name). Ignored when using 'SMC' as step method. chain_idx : int Chain number used to store sample in backend. If `chains` is greater than one, chain numbers will start here. Ignored when using 'SMC' as step method. chains : int The number of chains to sample. Running independent chains is important for some convergence statistics and can also reveal multiple modes in the posterior. If `None`, then set to either `cores` or 2, whichever is larger. For SMC the number of chains is the number of draws. cores : int The number of chains to run in parallel. If `None`, set to the number of CPUs in the system, but at most 4. When using 'SMC', this parameter will be ignored if running with `pm.SMC(parallel=False)`. Keep in mind that some chains might themselves be multithreaded via openmp or BLAS. In those cases it might be faster to set this to 1. tune : int Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition to the number specified in the `draws` argument, and will be discarded unless `discard_tuned_samples` is set to False. progressbar : bool Whether or not to display a progress bar in the command line. The bar shows the percentage of completion, the sampling speed in samples per second (SPS), and the estimated remaining time until completion (""expected time of arrival""; ETA). model : Model (optional if in `with` context) random_seed : int or list of ints A list is accepted if `cores` is greater than one. discard_tuned_samples : bool Whether to discard posterior samples of the tune interval. Ignored when using 'SMC' compute_convergence_checks : bool, default=True Whether to compute sampler statistics like Gelman-Rubin and `effective_n`. Ignored when using 'SMC' Returns ------- trace : pymc3.backends.base.MultiTrace A `MultiTrace` object that contains the samples. Notes ----- Optional keyword arguments can be passed to `sample` to be delivered to the `step_method`s used during sampling. In particular, the NUTS step method accepts a number of arguments. Common options are: * target_accept: float in [0, 1]. The step size is tuned such that we approximate this acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic posteriors. * max_treedepth: The maximum depth of the trajectory tree. * step_scale: float, default 0.25 The initial guess for the step size scaled down by :math:`1/n**(1/4)` You can find a full list of arguments in the docstring of the step methods. Examples -------- .. code:: ipython >>> import pymc3 as pm ... n = 100 ... h = 61 ... alpha = 2 ... beta = 2 .. code:: ipython >>> with pm.Model() as model: # context management ... p = pm.Beta('p', alpha=alpha, beta=beta) ... y = pm.Binomial('y', n=n, p=p, observed=h) ... trace = pm.sample(2000, tune=1000, cores=4) >>> pm.summary(trace) mean sd mc_error hpd_2.5 hpd_97.5 p 0.604625 0.047086 0.00078 0.510498 0.694774 """""" model = modelcontext(model) nuts_kwargs = kwargs.pop('nuts_kwargs', None) if nuts_kwargs is not None: warnings.warn(""The nuts_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(nuts_kwargs) step_kwargs = kwargs.pop('step_kwargs', None) if step_kwargs is not None: warnings.warn(""The step_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(step_kwargs) if cores is None: cores = min(4, _cpu_count()) if isinstance(step, pm.step_methods.smc.SMC): trace = smc.sample_smc(draws=draws, step=step, start=start, cores=cores, progressbar=progressbar, model=model, random_seed=random_seed) else: if 'njobs' in kwargs: cores = kwargs['njobs'] warnings.warn( ""The njobs argument has been deprecated. Use cores instead."", DeprecationWarning) if 'nchains' in kwargs: chains = kwargs['nchains'] warnings.warn( ""The nchains argument has been deprecated. Use chains instead."", DeprecationWarning) if chains is None: chains = max(2, cores) if isinstance(start, dict): start = [start] * chains if random_seed == -1: random_seed = None if chains == 1 and isinstance(random_seed, int): random_seed = [random_seed] if random_seed is None or isinstance(random_seed, int): if random_seed is not None: np.random.seed(random_seed) random_seed = [np.random.randint(2 ** 30) for _ in range(chains)] if not isinstance(random_seed, Iterable): raise TypeError( 'Invalid value for `random_seed`. Must be tuple, list or int') if 'chain' in kwargs: chain_idx = kwargs['chain'] warnings.warn( ""The chain argument has been deprecated. Use chain_idx instead."", DeprecationWarning) if start is not None: for start_vals in start: _check_start_shape(model, start_vals) # small trace warning if draws == 0: msg = ""Tuning was enabled throughout the whole trace."" _log.warning(msg) elif draws < 500: msg = ""Only %s samples in chain."" % draws _log.warning(msg) draws += tune if model.ndim == 0: raise ValueError('The model does not contain any free variables.') if step is None and init is not None and all_continuous(model.vars): try: # By default, try to use NUTS _log.info('Auto-assigning NUTS sampler...') start_, step = init_nuts(init=init, chains=chains, n_init=n_init, model=model, random_seed=random_seed, progressbar=progressbar, **kwargs) if start is None: start = start_ except (AttributeError, NotImplementedError, tg.NullTypeGradError): # gradient computation failed _log.info(""Initializing NUTS failed. "" ""Falling back to elementwise auto-assignment."") _log.debug('Exception in init nuts', exec_info=True) step = assign_step_methods(model, step, step_kwargs=kwargs) else: step = assign_step_methods(model, step, step_kwargs=kwargs) if isinstance(step, list): step = CompoundStep(step) if start is None: start = {} if isinstance(start, dict): start = [start] * chains sample_args = {'draws': draws, 'step': step, 'start': start, 'trace': trace, 'chain': chain_idx, 'chains': chains, 'tune': tune, 'progressbar': progressbar, 'model': model, 'random_seed': random_seed, 'cores': cores, } sample_args.update(kwargs) has_population_samplers = np.any([isinstance(m, arraystep.PopulationArrayStepShared) for m in (step.methods if isinstance(step, CompoundStep) else [step])]) parallel = cores > 1 and chains > 1 and not has_population_samplers if parallel: _log.info('Multiprocess sampling ({} chains in {} jobs)'.format(chains, cores)) _print_step_hierarchy(step) try: trace = _mp_sample(**sample_args) except pickle.PickleError: _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False except AttributeError as e: if str(e).startswith(""AttributeError: Can't pickle""): _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False else: raise if not parallel: if has_population_samplers: _log.info('Population sampling ({} chains)'.format(chains)) _print_step_hierarchy(step) trace = _sample_population(**sample_args, parallelize=cores > 1) else: _log.info('Sequential sampling ({} chains in 1 job)'.format(chains)) _print_step_hierarchy(step) trace = _sample_many(**sample_args) discard = tune if discard_tuned_samples else 0 trace = trace[discard:] if compute_convergence_checks: if draws-tune < 100: warnings.warn(""The number of samples is too small to check convergence reliably."") else: trace.report._run_convergence_checks(trace, model) trace.report._log_summary() return trace ","def sample(draws=500, step=None, init='auto', n_init=200000, start=None, trace=None, chain_idx=0, chains=None, cores=None, tune=500, progressbar=True, model=None, random_seed=None, discard_tuned_samples=True, compute_convergence_checks=True, **kwargs): """"""Draw samples from the posterior using the given step methods. Multiple step methods are supported via compound step methods. Parameters ---------- draws : int The number of samples to draw. Defaults to 500. The number of tuned samples are discarded by default. See `discard_tuned_samples`. step : function or iterable of functions A step function or collection of functions. If there are variables without a step methods, step methods for those variables will be assigned automatically. init : str Initialization method to use for auto-assigned NUTS samplers. * auto : Choose a default initialization method automatically. Currently, this is `'jitter+adapt_diag'`, but this can change in the future. If you depend on the exact behaviour, choose an initialization method explicitly. * adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the variance of the tuning samples. All chains use the test value (usually the prior mean) as starting point. * jitter+adapt_diag : Same as ``adapt_diag``, but add uniform jitter in [-1, 1] to the starting point in each chain. * advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the sample variance of the tuning samples. * advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based on the variance of the gradients during tuning. This is **experimental** and might be removed in a future release. * advi : Run ADVI to estimate posterior mean and diagonal mass matrix. * advi_map: Initialize ADVI with MAP and use MAP as starting point. * map : Use the MAP as starting point. This is discouraged. * nuts : Run NUTS and estimate posterior mean and mass matrix from the trace. n_init : int Number of iterations of initializer. Only works for 'nuts' and 'ADVI'. If 'ADVI', number of iterations, if 'nuts', number of draws. start : dict, or array of dict Starting point in parameter space (or partial point) Defaults to `trace.point(-1))` if there is a trace provided and model.test_point if not (defaults to empty dict). Initialization methods for NUTS (see `init` keyword) can overwrite the default. For 'SMC' step method, `start` should be a list of dicts of length = `chains`. trace : backend, list, or MultiTrace This should be a backend instance, a list of variables to track, or a MultiTrace object with past values. If a MultiTrace object is given, it must contain samples for the chain number `chain`. If None or a list of variables, the NDArray backend is used. Passing either ""text"" or ""sqlite"" is taken as a shortcut to set up the corresponding backend (with ""mcmc"" used as the base name). Ignored when using 'SMC' as step method. chain_idx : int Chain number used to store sample in backend. If `chains` is greater than one, chain numbers will start here. Ignored when using 'SMC' as step method. chains : int The number of chains to sample. Running independent chains is important for some convergence statistics and can also reveal multiple modes in the posterior. If `None`, then set to either `cores` or 2, whichever is larger. For SMC the number of chains is the number of draws. cores : int The number of chains to run in parallel. If `None`, set to the number of CPUs in the system, but at most 4. When using 'SMC', this parameter will be ignored if running with `pm.SMC(parallel=False)`. Keep in mind that some chains might themselves be multithreaded via openmp or BLAS. In those cases it might be faster to set this to 1. tune : int Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition to the number specified in the `draws` argument, and will be discarded unless `discard_tuned_samples` is set to False. progressbar : bool Whether or not to display a progress bar in the command line. The bar shows the percentage of completion, the sampling speed in samples per second (SPS), and the estimated remaining time until completion (""expected time of arrival""; ETA). model : Model (optional if in `with` context) random_seed : int or list of ints A list is accepted if `cores` is greater than one. discard_tuned_samples : bool Whether to discard posterior samples of the tune interval. Ignored when using 'SMC' compute_convergence_checks : bool, default=True Whether to compute sampler statistics like Gelman-Rubin and `effective_n`. Ignored when using 'SMC' Returns ------- trace : pymc3.backends.base.MultiTrace A `MultiTrace` object that contains the samples. Notes ----- Optional keyword arguments can be passed to `sample` to be delivered to the `step_method`s used during sampling. In particular, the NUTS step method accepts a number of arguments. Common options are: * target_accept: float in [0, 1]. The step size is tuned such that we approximate this acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic posteriors. * max_treedepth: The maximum depth of the trajectory tree. * step_scale: float, default 0.25 The initial guess for the step size scaled down by :math:`1/n**(1/4)` You can find a full list of arguments in the docstring of the step methods. Examples -------- .. code:: ipython >>> import pymc3 as pm ... n = 100 ... h = 61 ... alpha = 2 ... beta = 2 .. code:: ipython >>> with pm.Model() as model: # context management ... p = pm.Beta('p', alpha=alpha, beta=beta) ... y = pm.Binomial('y', n=n, p=p, observed=h) ... trace = pm.sample(2000, tune=1000, cores=4) >>> pm.summary(trace) mean sd mc_error hpd_2.5 hpd_97.5 p 0.604625 0.047086 0.00078 0.510498 0.694774 """""" model = modelcontext(model) nuts_kwargs = kwargs.pop('nuts_kwargs', None) if nuts_kwargs is not None: warnings.warn(""The nuts_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(nuts_kwargs) step_kwargs = kwargs.pop('step_kwargs', None) if step_kwargs is not None: warnings.warn(""The step_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(step_kwargs) if cores is None: cores = min(4, _cpu_count()) if isinstance(step, pm.step_methods.smc.SMC): trace = smc.sample_smc(draws=draws, step=step, start=start, cores=cores, progressbar=progressbar, model=model, random_seed=random_seed) else: if 'njobs' in kwargs: cores = kwargs['njobs'] warnings.warn( ""The njobs argument has been deprecated. Use cores instead."", DeprecationWarning) if 'nchains' in kwargs: chains = kwargs['nchains'] warnings.warn( ""The nchains argument has been deprecated. Use chains instead."", DeprecationWarning) if chains is None: chains = max(2, cores) if isinstance(start, dict): start = [start] * chains if random_seed == -1: random_seed = None if chains == 1 and isinstance(random_seed, int): random_seed = [random_seed] if random_seed is None or isinstance(random_seed, int): if random_seed is not None: np.random.seed(random_seed) random_seed = [np.random.randint(2 ** 30) for _ in range(chains)] if not isinstance(random_seed, Iterable): raise TypeError( 'Invalid value for `random_seed`. Must be tuple, list or int') if 'chain' in kwargs: chain_idx = kwargs['chain'] warnings.warn( ""The chain argument has been deprecated. Use chain_idx instead."", DeprecationWarning) if start is not None: for start_vals in start: _check_start_shape(model, start_vals) # small trace warning if draws == 0: msg = ""Tuning was enabled throughout the whole trace."" _log.warning(msg) elif draws < 500: msg = ""Only %s samples in chain."" % draws _log.warning(msg) draws += tune if model.ndim == 0: raise ValueError('The model does not contain any free variables.') if step is None and init is not None and all_continuous(model.vars): try: # By default, try to use NUTS _log.info('Auto-assigning NUTS sampler...') start_, step = init_nuts(init=init, chains=chains, n_init=n_init, model=model, random_seed=random_seed, progressbar=progressbar, **kwargs) if start is None: start = start_ except (AttributeError, NotImplementedError, tg.NullTypeGradError): # gradient computation failed _log.info(""Initializing NUTS failed. "" ""Falling back to elementwise auto-assignment."") _log.debug('Exception in init nuts', exec_info=True) step = assign_step_methods(model, step, step_kwargs=kwargs) else: step = assign_step_methods(model, step, step_kwargs=kwargs) if isinstance(step, list): step = CompoundStep(step) if start is None: start = {} if isinstance(start, dict): start = [start] * chains sample_args = {'draws': draws, 'step': step, 'start': start, 'trace': trace, 'chain': chain_idx, 'chains': chains, 'tune': tune, 'progressbar': progressbar, 'model': model, 'random_seed': random_seed, 'cores': cores, } sample_args.update(kwargs) has_population_samplers = np.any([isinstance(m, arraystep.PopulationArrayStepShared) for m in (step.methods if isinstance(step, CompoundStep) else [step])]) parallel = cores > 1 and chains > 1 and not has_population_samplers if parallel: _log.info('Multiprocess sampling ({} chains in {} jobs)'.format(chains, cores)) _print_step_hierarchy(step) try: trace = _mp_sample(**sample_args) except pickle.PickleError: _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False except AttributeError as e: if str(e).startswith(""AttributeError: Can't pickle""): _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False else: raise if not parallel: if has_population_samplers: _log.info('Population sampling ({} chains)'.format(chains)) _print_step_hierarchy(step) trace = _sample_population(**sample_args, parallelize=cores > 1) else: _log.info('Sequential sampling ({} chains in 1 job)'.format(chains)) _print_step_hierarchy(step) trace = _sample_many(**sample_args) discard = tune if discard_tuned_samples else 0 trace = trace[discard:] if compute_convergence_checks: if draws-tune < 100: warnings.warn(""The number of samples is too small to check convergence reliably."") else: trace.report._run_convergence_checks(trace, model) trace.report._log_summary() return trace " 18432,"def module(*args): module_cmd = eval(_cmd_template) # So we can monkeypatch for testing if args[0] in module_change_commands: # Do the module manipulation, then output the environment in JSON # and read the JSON back in the parent process to update os.environ # For python, we use the same python running the Spack process, because # we can guarantee its existence. We have to do some LD_LIBRARY_PATH # shenanigans to ensure python will run. # LD_LIBRARY_PATH under which Spack ran os.environ['SPACK_LD_LIBRARY_PATH'] = spack.main.spack_ld_library_path # suppress output from module function module_cmd += ' >/dev/null;' # Capture the new LD_LIBRARY_PATH after `module` was run module_cmd += 'export SPACK_NEW_LD_LIBRARY_PATH=""$LD_LIBRARY_PATH"";' # Set LD_LIBRARY_PATH to value at Spack startup time to ensure that # python executable finds its libraries module_cmd += 'LD_LIBRARY_PATH=""$SPACK_LD_LIBRARY_PATH"" ' # Execute the python command module_cmd += '%s -c ""%s"";' % (sys.executable, py_cmd) # If LD_LIBRARY_PATH was set after `module`, dump the old value because # we have since corrupted it to ensure python would run. # dump SPACKIGNORE as a placeholder for parsing if LD_LIBRARY_PATH null module_cmd += 'echo ""${SPACK_NEW_LD_LIBRARY_PATH:-SPACKIGNORE}""' module_p = subprocess.Popen(module_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, executable=""/bin/bash"") # Cray modules spit out warnings that we cannot supress. # This hack skips to the last output (the environment) env_out = str(module_p.communicate()[0].decode()).strip().split('\n') # The environment dumped as json env_json = env_out[-2] # Either the uncorrupted $LD_LIBRARY_PATH or SPACKIGNORE new_ld_library_path = env_out[-1] # Update os.environ with new dict env_dict = json.loads(env_json) os.environ.clear() os.environ.update(env_dict) # Override restored LD_LIBRARY_PATH with pre-python value if new_ld_library_path == 'SPACKIGNORE': os.environ.pop('LD_LIBRARY_PATH', None) else: os.environ['LD_LIBRARY_PATH'] = new_ld_library_path else: # Simply execute commands that don't change state and return output module_p = subprocess.Popen(module_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, executable=""/bin/bash"") # Decode and str to return a string object in both python 2 and 3 return str(module_p.communicate()[0].decode()) ","def module(*args): module_cmd = eval(_cmd_template) # So we can monkeypatch for testing if args[0] in module_change_commands: # Do the module manipulation, then output the environment in JSON # and read the JSON back in the parent process to update os.environ # For python, we use the same python running the Spack process, because # we can guarantee its existence. We have to do some LD_LIBRARY_PATH # shenanigans to ensure python will run. # LD_LIBRARY_PATH under which Spack ran os.environ['SPACK_LD_LIBRARY_PATH'] = spack.main.spack_ld_library_path # suppress output from module function module_cmd += ' >/dev/null;' # Capture the new LD_LIBRARY_PATH after `module` was run module_cmd += 'export SPACK_NEW_LD_LIBRARY_PATH=""$LD_LIBRARY_PATH"";' # Set LD_LIBRARY_PATH to value at Spack startup time to ensure that # python executable finds its libraries module_cmd += 'LD_LIBRARY_PATH=""$SPACK_LD_LIBRARY_PATH"" ' # Execute the python command module_cmd += '%s -c ""%s"";' % (sys.executable, py_cmd) # If LD_LIBRARY_PATH was set after `module`, dump the old value because # we have since corrupted it to ensure python would run. # dump SPACKIGNORE as a placeholder for parsing if LD_LIBRARY_PATH null module_cmd += 'echo ""${SPACK_NEW_LD_LIBRARY_PATH:-SPACKIGNORE}""' module_p = subprocess.Popen(module_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, executable=""/bin/bash"") # Cray modules spit out warnings that we cannot suppress. # This hack skips to the last output (the environment) env_out = str(module_p.communicate()[0].decode()).strip().split('\n') # The environment dumped as json env_json = env_out[-2] # Either the uncorrupted $LD_LIBRARY_PATH or SPACKIGNORE new_ld_library_path = env_out[-1] # Update os.environ with new dict env_dict = json.loads(env_json) os.environ.clear() os.environ.update(env_dict) # Override restored LD_LIBRARY_PATH with pre-python value if new_ld_library_path == 'SPACKIGNORE': os.environ.pop('LD_LIBRARY_PATH', None) else: os.environ['LD_LIBRARY_PATH'] = new_ld_library_path else: # Simply execute commands that don't change state and return output module_p = subprocess.Popen(module_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, executable=""/bin/bash"") # Decode and str to return a string object in both python 2 and 3 return str(module_p.communicate()[0].decode()) " 31412,"def url_command(client: Client, url_list: list, reliability: DBotScoreReliability) -> List[CommandResults]: command_results: List[CommandResults] = [] for url in url_list: markdown = ""### PhishTankV2 Database - URL Query \n"" url_data, url = get_url_data(client, url) url_data_is_valid = url_data and ""verified"" in url_data.keys() if url_data_is_valid: dbot = url_data_to_dbot_score(url_data, url, reliability=reliability) markdown += create_verified_markdown(url_data, url) else: markdown += f'#### No matches for URL {url} \n' dbot = Common.DBotScore(url, DBotScoreType.URL, ""PhishTankV2"", 0) command_results.append(CommandResults( indicator=Common.URL(url, dbot), readable_output=markdown, )) return command_results ","def url_command(client: Client, url_list: list, reliability: DBotScoreReliability) -> List[CommandResults]: command_results: List[CommandResults] = [] for url in url_list: markdown = ""### PhishTankV2 Database - URL Query \n"" url_data, url = get_url_data(client, url) url_data_is_valid = url_data and ""verified"" in url_data.keys() if url_data_is_valid: dbot = url_data_to_dbot_score(url_data, url, reliability) markdown += create_verified_markdown(url_data, url) else: markdown += f'#### No matches for URL {url} \n' dbot = Common.DBotScore(url, DBotScoreType.URL, ""PhishTankV2"", 0) command_results.append(CommandResults( indicator=Common.URL(url, dbot), readable_output=markdown, )) return command_results " 23572,"def notify(notifier_id=None, notify_action=None, stream_data=None, timeline_data=None, parameters=None, **kwargs): logger.info(""Tautulli NotificationHandler :: Preparing notification for notifier_id %s."" % notifier_id) notifier_config = notifiers.get_notifier_config(notifier_id=notifier_id) if not notifier_config: return if notify_action in ('test', 'api'): subject = kwargs.pop('subject', 'Tautulli') body = kwargs.pop('body', 'Test Notification') script_args = helpers.split_args(kwargs.pop('script_args', [])) else: # Get the subject and body strings subject_string = notifier_config['notify_text'][notify_action]['subject'] body_string = notifier_config['notify_text'][notify_action]['body'] # Format the subject and body strings subject, body, script_args = build_notify_text(subject=subject_string, body=body_string, notify_action=notify_action, parameters=parameters, agent_id=notifier_config['agent_id'], as_json=notifier_config['config']['as_json']) # Set the notification state in the db notification_id = set_notify_state(session=stream_data or timeline_data, notifier=notifier_config, notify_action=notify_action, subject=subject, body=body, script_args=script_args, parameters=parameters) # Send the notification success = notifiers.send_notification(notifier_id=notifier_config['id'], subject=subject, body=body, script_args=script_args, notify_action=notify_action, notification_id=notification_id, parameters=parameters or {}, **kwargs) if success: set_notify_success(notification_id) return True ","def notify(notifier_id=None, notify_action=None, stream_data=None, timeline_data=None, parameters=None, **kwargs): logger.info(""Tautulli NotificationHandler :: Preparing notification for notifier_id %s."" % notifier_id) notifier_config = notifiers.get_notifier_config(notifier_id=notifier_id) if not notifier_config: return if notify_action in ('test', 'api'): subject = kwargs.pop('subject', 'Tautulli') body = kwargs.pop('body', 'Test Notification') script_args = helpers.split_args(kwargs.pop('script_args', [])) else: # Get the subject and body strings subject_string = notifier_config['notify_text'][notify_action]['subject'] body_string = notifier_config['notify_text'][notify_action]['body'] # Format the subject and body strings subject, body, script_args = build_notify_text(subject=subject_string, body=body_string, notify_action=notify_action, parameters=parameters, agent_id=notifier_config['agent_id'], as_json=notifier_config['config'].get('as_json', False)) # Set the notification state in the db notification_id = set_notify_state(session=stream_data or timeline_data, notifier=notifier_config, notify_action=notify_action, subject=subject, body=body, script_args=script_args, parameters=parameters) # Send the notification success = notifiers.send_notification(notifier_id=notifier_config['id'], subject=subject, body=body, script_args=script_args, notify_action=notify_action, notification_id=notification_id, parameters=parameters or {}, **kwargs) if success: set_notify_success(notification_id) return True " 36462,"def _check_arg_types(funcname, *args): hasstr = hasbytes = False for s in args: if isinstance(s, str): hasstr = True elif isinstance(s, bytes): hasbytes = True else: raise TypeError('%s() argument must be str, bytes or os.PathLike object, not %r' % (funcname, s.__class__.__name__)) from None if hasstr and hasbytes: raise TypeError(""Can't mix strings and bytes in path components"") from None ","def _check_arg_types(funcname, *args): hasstr = hasbytes = False for s in args: if isinstance(s, str): hasstr = True elif isinstance(s, bytes): hasbytes = True else: raise TypeError('%s() argument must be str, bytes, or os.PathLike object, not %r' % (funcname, s.__class__.__name__)) from None if hasstr and hasbytes: raise TypeError(""Can't mix strings and bytes in path components"") from None " 53748,"def read_file(filename): with open(str(Path(""."", filename).absolute())) as f: return f.read().strip() ","def read_file(filename): readme_file = Path(""."", filename).absolute() with open(readme_file, encoding=""utf-8"") as fd: return fd.read().strip() " 27707,"def get_dirs_from_args(args: List[str]) -> List[py.path.local]: def is_option(x: str) -> bool: return x.startswith(""-"") def get_file_part_from_node_id(x: str) -> str: return x.split(""::"")[0] def get_dir_from_path(path: py.path.local) -> py.path.local: if path.isdir(): return path return py.path.local(path.dirname) # These look like paths but may not exist possible_paths = ( py.path.local(get_file_part_from_node_id(arg)) for arg in args if not is_option(arg) ) return [get_dir_from_path(path) for path in possible_paths if path.exists()] ","def get_dirs_from_args(args: Iterable[str]) -> List[py.path.local]: def is_option(x: str) -> bool: return x.startswith(""-"") def get_file_part_from_node_id(x: str) -> str: return x.split(""::"")[0] def get_dir_from_path(path: py.path.local) -> py.path.local: if path.isdir(): return path return py.path.local(path.dirname) # These look like paths but may not exist possible_paths = ( py.path.local(get_file_part_from_node_id(arg)) for arg in args if not is_option(arg) ) return [get_dir_from_path(path) for path in possible_paths if path.exists()] " 5476,"def change_locale_bn_bd_to_bn_and_remove_bn_inforwards(apps, schema_editor): Document = apps.get_model('wiki', 'Document') DocumentDeletionLog = apps.get_model('wiki', 'DocumentDeletionLog') Document.objects.all().filter(locale='bn-BD').update(locale='bn') DocumentDeletionLog.objects.all().filter(locale='bn-BD').update(locale='bn') # Remove bn-IN Document.objects.all().filter(locale='bn-IN').delete() DocumentDeletionLog.objects.all().filter(locale='bn-IN').delete() ","def change_locale_bn_bd_to_bn_and_remove_bn_in_forwards(apps, schema_editor): Document = apps.get_model('wiki', 'Document') DocumentDeletionLog = apps.get_model('wiki', 'DocumentDeletionLog') Document.objects.all().filter(locale='bn-BD').update(locale='bn') DocumentDeletionLog.objects.all().filter(locale='bn-BD').update(locale='bn') # Remove bn-IN Document.objects.all().filter(locale='bn-IN').delete() DocumentDeletionLog.objects.all().filter(locale='bn-IN').delete() " 6566,"def _get_item_tax_template(args, taxes, out=None, for_validate=False): if out is None: out = {} taxes_with_validity = [] taxes_with_no_validity = [] for tax in taxes: tax_company = frappe.get_value(""Item Tax Template"", tax.item_tax_template, 'company') if (tax.valid_from or tax.maximum_net_rate) and tax_company == args['company']: # In purchase Invoice first preference will be given to supplier invoice date # if supplier date is not present then posting date validation_date = args.get('transaction_date') or args.get('bill_date') or args.get('posting_date') if getdate(tax.valid_from) <= getdate(validation_date) \ and is_within_valid_range(args, tax): taxes_with_validity.append(tax) else: if tax_company == args['company']: taxes_with_no_validity.append(tax) if taxes_with_validity: taxes = sorted(taxes_with_validity, key = lambda i: i.valid_from, reverse=True) else: taxes = taxes_with_no_validity if for_validate: return [tax.item_tax_template for tax in taxes if (cstr(tax.tax_category) == cstr(args.get('tax_category')) \ and (tax.item_tax_template not in taxes))] # all templates have validity and no template is valid if not taxes_with_validity and (not taxes_with_no_validity): return None # do not change if already a valid template if args.get('item_tax_template') in [t.item_tax_template for t in taxes]: out[""item_tax_template""] = args.get('item_tax_template') return args.get('item_tax_template') for tax in taxes: if cstr(tax.tax_category) == cstr(args.get(""tax_category"")): out[""item_tax_template""] = tax.item_tax_template return tax.item_tax_template return None ","def _get_item_tax_template(args, taxes, out=None, for_validate=False): if out is None: out = {} taxes_with_validity = [] taxes_with_no_validity = [] for tax in taxes: tax_company = frappe.get_value(""Item Tax Template"", tax.item_tax_template, 'company') if (tax.valid_from or tax.maximum_net_rate) and tax_company == args['company']: # In purchase Invoice first preference will be given to supplier invoice date # if supplier date is not present then posting date validation_date = args.get('transaction_date') or args.get('bill_date') or args.get('posting_date') if getdate(tax.valid_from) <= getdate(validation_date) \ and is_within_valid_range(args, tax): taxes_with_validity.append(tax) else: if tax_company == args['company']: taxes_with_no_validity.append(tax) if taxes_with_validity: taxes = sorted(taxes_with_validity, key = lambda i: i.valid_from, reverse=True) else: taxes = taxes_with_no_validity if for_validate: return [tax.item_tax_template for tax in taxes if (cstr(tax.tax_category) == cstr(args.get('tax_category')) \ and (tax.item_tax_template not in taxes))] # all templates have validity and no template is valid if not taxes_with_validity and (not taxes_with_no_validity): return None # do not change if already a valid template if args.get('item_tax_template') in {t.item_tax_template for t in taxes}: out[""item_tax_template""] = args.get('item_tax_template') return args.get('item_tax_template') for tax in taxes: if cstr(tax.tax_category) == cstr(args.get(""tax_category"")): out[""item_tax_template""] = tax.item_tax_template return tax.item_tax_template return None " 10855,"def create_password(journal_name: str, prompt: str = ""Enter password for new journal: "") -> str: while True: pw = gp.getpass(prompt) if not pw: print(""Password can't be an empty string!"", file=sys.stderr) continue elif pw == gp.getpass(""Enter password again: ""): break gp.getpass(""Passwords did not match, please try again"") if yesno(""Do you want to store the password in your keychain?"", default=True): set_keychain(journal_name, pw) else: set_keychain(journal_name, None) return pw ","def create_password(journal_name: str, prompt: str = ""Enter password for new journal: "") -> str: while True: pw = gp.getpass(prompt) if not pw: print(""Password can't be an empty string!"", file=sys.stderr) continue elif pw == gp.getpass(""Enter password again: ""): break print(""Passwords did not match, please try again"", file=sys.stderr) if yesno(""Do you want to store the password in your keychain?"", default=True): set_keychain(journal_name, pw) else: set_keychain(journal_name, None) return pw " 276,"def sample(draws=500, step=None, init='auto', n_init=200000, start=None, trace=None, chain_idx=0, chains=None, cores=None, tune=500, progressbar=True, model=None, random_seed=None, discard_tuned_samples=True, compute_convergence_checks=True, **kwargs): """"""Draw samples from the posterior using the given step methods. Multiple step methods are supported via compound step methods. Parameters ---------- draws : int The number of samples to draw. Defaults to 500. The number of tuned samples are discarded by default. See `discard_tuned_samples`. step : function or iterable of functions A step function or collection of functions. If there are variables without a step methods, step methods for those variables will be assigned automatically. init : str Initialization method to use for auto-assigned NUTS samplers. * auto : Choose a default initialization method automatically. Currently, this is `'jitter+adapt_diag'`, but this can change in the future. If you depend on the exact behaviour, choose an initialization method explicitly. * adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the variance of the tuning samples. All chains use the test value (usually the prior mean) as starting point. * jitter+adapt_diag : Same as `adapt_diag`\, but add uniform jitter in [-1, 1] to the starting point in each chain. * advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the sample variance of the tuning samples. * advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based on the variance of the gradients during tuning. This is **experimental** and might be removed in a future release. * advi : Run ADVI to estimate posterior mean and diagonal mass matrix. * advi_map: Initialize ADVI with MAP and use MAP as starting point. * map : Use the MAP as starting point. This is discouraged. * nuts : Run NUTS and estimate posterior mean and mass matrix from the trace. n_init : int Number of iterations of initializer. Only works for 'nuts' and 'ADVI'. If 'ADVI', number of iterations, if 'nuts', number of draws. start : dict, or array of dict Starting point in parameter space (or partial point) Defaults to `trace.point(-1))` if there is a trace provided and model.test_point if not (defaults to empty dict). Initialization methods for NUTS (see `init` keyword) can overwrite the default. For 'SMC' step method, `start` should be a list of dicts of length = `chains`. trace : backend, list, or MultiTrace This should be a backend instance, a list of variables to track, or a MultiTrace object with past values. If a MultiTrace object is given, it must contain samples for the chain number `chain`. If None or a list of variables, the NDArray backend is used. Passing either ""text"" or ""sqlite"" is taken as a shortcut to set up the corresponding backend (with ""mcmc"" used as the base name). Ignored when using 'SMC' as step method. chain_idx : int Chain number used to store sample in backend. If `chains` is greater than one, chain numbers will start here. Ignored when using 'SMC' as step method. chains : int The number of chains to sample. Running independent chains is important for some convergence statistics and can also reveal multiple modes in the posterior. If `None`, then set to either `cores` or 2, whichever is larger. For SMC the number of chains is the number of draws. cores : int The number of chains to run in parallel. If `None`, set to the number of CPUs in the system, but at most 4. When using 'SMC', this parameter will be ignored if running with `pm.SMC(parallel=False)`. Keep in mind that some chains might themselves be multithreaded via openmp or BLAS. In those cases it might be faster to set this to 1. tune : int Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition to the number specified in the `draws` argument, and will be discarded unless `discard_tuned_samples` is set to False. progressbar : bool Whether or not to display a progress bar in the command line. The bar shows the percentage of completion, the sampling speed in samples per second (SPS), and the estimated remaining time until completion (""expected time of arrival""; ETA). model : Model (optional if in `with` context) random_seed : int or list of ints A list is accepted if `cores` is greater than one. discard_tuned_samples : bool Whether to discard posterior samples of the tune interval. Ignored when using 'SMC' compute_convergence_checks : bool, default=True Whether to compute sampler statistics like Gelman-Rubin and `effective_n`. Ignored when using 'SMC' Returns ------- trace : pymc3.backends.base.MultiTrace A `MultiTrace` object that contains the samples. Notes ----- Optional keyword arguments can be passed to `sample` to be delivered to the `step_method`s used during sampling. In particular, the NUTS step method accepts a number of arguments. Common options are: * target_accept: float in [0, 1]. The step size is tuned such that we approximate this acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic posteriors. * max_treedepth: The maximum depth of the trajectory tree. * step_scale: float, default 0.25 The initial guess for the step size scaled down by :math:`1/n**(1/4)` You can find a full list of arguments in the docstring of the step methods. Examples -------- .. code:: ipython >>> import pymc3 as pm ... n = 100 ... h = 61 ... alpha = 2 ... beta = 2 .. code:: ipython >>> with pm.Model() as model: # context management ... p = pm.Beta('p', alpha=alpha, beta=beta) ... y = pm.Binomial('y', n=n, p=p, observed=h) ... trace = pm.sample(2000, tune=1000, cores=4) >>> pm.summary(trace) mean sd mc_error hpd_2.5 hpd_97.5 p 0.604625 0.047086 0.00078 0.510498 0.694774 """""" model = modelcontext(model) nuts_kwargs = kwargs.pop('nuts_kwargs', None) if nuts_kwargs is not None: warnings.warn(""The nuts_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(nuts_kwargs) step_kwargs = kwargs.pop('step_kwargs', None) if step_kwargs is not None: warnings.warn(""The step_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(step_kwargs) if cores is None: cores = min(4, _cpu_count()) if isinstance(step, pm.step_methods.smc.SMC): trace = smc.sample_smc(draws=draws, step=step, start=start, cores=cores, progressbar=progressbar, model=model, random_seed=random_seed) else: if 'njobs' in kwargs: cores = kwargs['njobs'] warnings.warn( ""The njobs argument has been deprecated. Use cores instead."", DeprecationWarning) if 'nchains' in kwargs: chains = kwargs['nchains'] warnings.warn( ""The nchains argument has been deprecated. Use chains instead."", DeprecationWarning) if chains is None: chains = max(2, cores) if isinstance(start, dict): start = [start] * chains if random_seed == -1: random_seed = None if chains == 1 and isinstance(random_seed, int): random_seed = [random_seed] if random_seed is None or isinstance(random_seed, int): if random_seed is not None: np.random.seed(random_seed) random_seed = [np.random.randint(2 ** 30) for _ in range(chains)] if not isinstance(random_seed, Iterable): raise TypeError( 'Invalid value for `random_seed`. Must be tuple, list or int') if 'chain' in kwargs: chain_idx = kwargs['chain'] warnings.warn( ""The chain argument has been deprecated. Use chain_idx instead."", DeprecationWarning) if start is not None: for start_vals in start: _check_start_shape(model, start_vals) # small trace warning if draws == 0: msg = ""Tuning was enabled throughout the whole trace."" _log.warning(msg) elif draws < 500: msg = ""Only %s samples in chain."" % draws _log.warning(msg) draws += tune if model.ndim == 0: raise ValueError('The model does not contain any free variables.') if step is None and init is not None and all_continuous(model.vars): try: # By default, try to use NUTS _log.info('Auto-assigning NUTS sampler...') start_, step = init_nuts(init=init, chains=chains, n_init=n_init, model=model, random_seed=random_seed, progressbar=progressbar, **kwargs) if start is None: start = start_ except (AttributeError, NotImplementedError, tg.NullTypeGradError): # gradient computation failed _log.info(""Initializing NUTS failed. "" ""Falling back to elementwise auto-assignment."") _log.debug('Exception in init nuts', exec_info=True) step = assign_step_methods(model, step, step_kwargs=kwargs) else: step = assign_step_methods(model, step, step_kwargs=kwargs) if isinstance(step, list): step = CompoundStep(step) if start is None: start = {} if isinstance(start, dict): start = [start] * chains sample_args = {'draws': draws, 'step': step, 'start': start, 'trace': trace, 'chain': chain_idx, 'chains': chains, 'tune': tune, 'progressbar': progressbar, 'model': model, 'random_seed': random_seed, 'cores': cores, } sample_args.update(kwargs) has_population_samplers = np.any([isinstance(m, arraystep.PopulationArrayStepShared) for m in (step.methods if isinstance(step, CompoundStep) else [step])]) parallel = cores > 1 and chains > 1 and not has_population_samplers if parallel: _log.info('Multiprocess sampling ({} chains in {} jobs)'.format(chains, cores)) _print_step_hierarchy(step) try: trace = _mp_sample(**sample_args) except pickle.PickleError: _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False except AttributeError as e: if str(e).startswith(""AttributeError: Can't pickle""): _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False else: raise if not parallel: if has_population_samplers: _log.info('Population sampling ({} chains)'.format(chains)) _print_step_hierarchy(step) trace = _sample_population(**sample_args, parallelize=cores > 1) else: _log.info('Sequential sampling ({} chains in 1 job)'.format(chains)) _print_step_hierarchy(step) trace = _sample_many(**sample_args) discard = tune if discard_tuned_samples else 0 trace = trace[discard:] if compute_convergence_checks: if draws-tune < 100: warnings.warn(""The number of samples is too small to check convergence reliably."") else: trace.report._run_convergence_checks(trace, model) trace.report._log_summary() return trace ","def sample(draws=500, step=None, init='auto', n_init=200000, start=None, trace=None, chain_idx=0, chains=None, cores=None, tune=500, progressbar=True, model=None, random_seed=None, discard_tuned_samples=True, compute_convergence_checks=True, **kwargs): """"""Draw samples from the posterior using the given step methods. Multiple step methods are supported via compound step methods. Parameters ---------- draws : int The number of samples to draw. Defaults to 500. The number of tuned samples are discarded by default. See `discard_tuned_samples`. step : function or iterable of functions A step function or collection of functions. If there are variables without a step methods, step methods for those variables will be assigned automatically. init : str Initialization method to use for auto-assigned NUTS samplers. * auto : Choose a default initialization method automatically. Currently, this is `'jitter+adapt_diag'`, but this can change in the future. If you depend on the exact behaviour, choose an initialization method explicitly. * adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the variance of the tuning samples. All chains use the test value (usually the prior mean) as starting point. * jitter+adapt_diag : Same as `adapt_diag`\, but add uniform jitter in [-1, 1] to the starting point in each chain. * advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the sample variance of the tuning samples. * advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based on the variance of the gradients during tuning. This is **experimental** and might be removed in a future release. * advi : Run ADVI to estimate posterior mean and diagonal mass matrix. * advi_map: Initialize ADVI with MAP and use MAP as starting point. * map : Use the MAP as starting point. This is discouraged. * nuts : Run NUTS and estimate posterior mean and mass matrix from the trace. n_init : int Number of iterations of initializer. Only works for 'nuts' and 'ADVI'. If 'ADVI', number of iterations, if 'nuts', number of draws. start : dict, or array of dict Starting point in parameter space (or partial point) Defaults to `trace.point(-1))` if there is a trace provided and model.test_point if not (defaults to empty dict). Initialization methods for NUTS (see `init` keyword) can overwrite the default. For 'SMC' step method, ``start`` should be a list of dicts of length = `chains`. trace : backend, list, or MultiTrace This should be a backend instance, a list of variables to track, or a MultiTrace object with past values. If a MultiTrace object is given, it must contain samples for the chain number `chain`. If None or a list of variables, the NDArray backend is used. Passing either ""text"" or ""sqlite"" is taken as a shortcut to set up the corresponding backend (with ""mcmc"" used as the base name). Ignored when using 'SMC' as step method. chain_idx : int Chain number used to store sample in backend. If `chains` is greater than one, chain numbers will start here. Ignored when using 'SMC' as step method. chains : int The number of chains to sample. Running independent chains is important for some convergence statistics and can also reveal multiple modes in the posterior. If `None`, then set to either `cores` or 2, whichever is larger. For SMC the number of chains is the number of draws. cores : int The number of chains to run in parallel. If `None`, set to the number of CPUs in the system, but at most 4. When using 'SMC', this parameter will be ignored if running with `pm.SMC(parallel=False)`. Keep in mind that some chains might themselves be multithreaded via openmp or BLAS. In those cases it might be faster to set this to 1. tune : int Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition to the number specified in the `draws` argument, and will be discarded unless `discard_tuned_samples` is set to False. progressbar : bool Whether or not to display a progress bar in the command line. The bar shows the percentage of completion, the sampling speed in samples per second (SPS), and the estimated remaining time until completion (""expected time of arrival""; ETA). model : Model (optional if in `with` context) random_seed : int or list of ints A list is accepted if `cores` is greater than one. discard_tuned_samples : bool Whether to discard posterior samples of the tune interval. Ignored when using 'SMC' compute_convergence_checks : bool, default=True Whether to compute sampler statistics like Gelman-Rubin and `effective_n`. Ignored when using 'SMC' Returns ------- trace : pymc3.backends.base.MultiTrace A `MultiTrace` object that contains the samples. Notes ----- Optional keyword arguments can be passed to `sample` to be delivered to the `step_method`s used during sampling. In particular, the NUTS step method accepts a number of arguments. Common options are: * target_accept: float in [0, 1]. The step size is tuned such that we approximate this acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic posteriors. * max_treedepth: The maximum depth of the trajectory tree. * step_scale: float, default 0.25 The initial guess for the step size scaled down by :math:`1/n**(1/4)` You can find a full list of arguments in the docstring of the step methods. Examples -------- .. code:: ipython >>> import pymc3 as pm ... n = 100 ... h = 61 ... alpha = 2 ... beta = 2 .. code:: ipython >>> with pm.Model() as model: # context management ... p = pm.Beta('p', alpha=alpha, beta=beta) ... y = pm.Binomial('y', n=n, p=p, observed=h) ... trace = pm.sample(2000, tune=1000, cores=4) >>> pm.summary(trace) mean sd mc_error hpd_2.5 hpd_97.5 p 0.604625 0.047086 0.00078 0.510498 0.694774 """""" model = modelcontext(model) nuts_kwargs = kwargs.pop('nuts_kwargs', None) if nuts_kwargs is not None: warnings.warn(""The nuts_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(nuts_kwargs) step_kwargs = kwargs.pop('step_kwargs', None) if step_kwargs is not None: warnings.warn(""The step_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(step_kwargs) if cores is None: cores = min(4, _cpu_count()) if isinstance(step, pm.step_methods.smc.SMC): trace = smc.sample_smc(draws=draws, step=step, start=start, cores=cores, progressbar=progressbar, model=model, random_seed=random_seed) else: if 'njobs' in kwargs: cores = kwargs['njobs'] warnings.warn( ""The njobs argument has been deprecated. Use cores instead."", DeprecationWarning) if 'nchains' in kwargs: chains = kwargs['nchains'] warnings.warn( ""The nchains argument has been deprecated. Use chains instead."", DeprecationWarning) if chains is None: chains = max(2, cores) if isinstance(start, dict): start = [start] * chains if random_seed == -1: random_seed = None if chains == 1 and isinstance(random_seed, int): random_seed = [random_seed] if random_seed is None or isinstance(random_seed, int): if random_seed is not None: np.random.seed(random_seed) random_seed = [np.random.randint(2 ** 30) for _ in range(chains)] if not isinstance(random_seed, Iterable): raise TypeError( 'Invalid value for `random_seed`. Must be tuple, list or int') if 'chain' in kwargs: chain_idx = kwargs['chain'] warnings.warn( ""The chain argument has been deprecated. Use chain_idx instead."", DeprecationWarning) if start is not None: for start_vals in start: _check_start_shape(model, start_vals) # small trace warning if draws == 0: msg = ""Tuning was enabled throughout the whole trace."" _log.warning(msg) elif draws < 500: msg = ""Only %s samples in chain."" % draws _log.warning(msg) draws += tune if model.ndim == 0: raise ValueError('The model does not contain any free variables.') if step is None and init is not None and all_continuous(model.vars): try: # By default, try to use NUTS _log.info('Auto-assigning NUTS sampler...') start_, step = init_nuts(init=init, chains=chains, n_init=n_init, model=model, random_seed=random_seed, progressbar=progressbar, **kwargs) if start is None: start = start_ except (AttributeError, NotImplementedError, tg.NullTypeGradError): # gradient computation failed _log.info(""Initializing NUTS failed. "" ""Falling back to elementwise auto-assignment."") _log.debug('Exception in init nuts', exec_info=True) step = assign_step_methods(model, step, step_kwargs=kwargs) else: step = assign_step_methods(model, step, step_kwargs=kwargs) if isinstance(step, list): step = CompoundStep(step) if start is None: start = {} if isinstance(start, dict): start = [start] * chains sample_args = {'draws': draws, 'step': step, 'start': start, 'trace': trace, 'chain': chain_idx, 'chains': chains, 'tune': tune, 'progressbar': progressbar, 'model': model, 'random_seed': random_seed, 'cores': cores, } sample_args.update(kwargs) has_population_samplers = np.any([isinstance(m, arraystep.PopulationArrayStepShared) for m in (step.methods if isinstance(step, CompoundStep) else [step])]) parallel = cores > 1 and chains > 1 and not has_population_samplers if parallel: _log.info('Multiprocess sampling ({} chains in {} jobs)'.format(chains, cores)) _print_step_hierarchy(step) try: trace = _mp_sample(**sample_args) except pickle.PickleError: _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False except AttributeError as e: if str(e).startswith(""AttributeError: Can't pickle""): _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False else: raise if not parallel: if has_population_samplers: _log.info('Population sampling ({} chains)'.format(chains)) _print_step_hierarchy(step) trace = _sample_population(**sample_args, parallelize=cores > 1) else: _log.info('Sequential sampling ({} chains in 1 job)'.format(chains)) _print_step_hierarchy(step) trace = _sample_many(**sample_args) discard = tune if discard_tuned_samples else 0 trace = trace[discard:] if compute_convergence_checks: if draws-tune < 100: warnings.warn(""The number of samples is too small to check convergence reliably."") else: trace.report._run_convergence_checks(trace, model) trace.report._log_summary() return trace " 36264,"def combat( adata: AnnData, key: str = 'batch', covariates: Optional[Collection[str]] = None, inplace: bool = True, adata2: AnnData = None, key2: str = 'batch', ) -> Union[AnnData, np.ndarray, None]: """"""\ ComBat function for batch effect correction [Johnson07]_ [Leek12]_ [Pedersen12]_. Corrects for batch effects by fitting linear models, gains statistical power via an EB framework where information is borrowed across genes. This uses the implementation `combat.py`_ [Pedersen12]_ and the adjustment for control cells to account for different composition [Boettcher20]_ . .. _combat.py: https://github.com/brentp/combat.py Parameters ---------- adata Annotated data matrix key Key to a categorical annotation from :attr:`~anndata.AnnData.obs` that will be used for batch effect removal. covariates Additional covariates besides the batch variable such as adjustment variables or biological condition. This parameter refers to the design matrix `X` in Equation 2.1 in [Johnson07]_ and to the `mod` argument in the original combat function in the sva R package. Note that not including covariates may introduce bias or lead to the removal of biological signal in unbalanced designs. inplace Whether to replace adata.X or to return the corrected data adata2 Annotated data matrix where the same correction will be applied as for adata. This is particularly useful to account for compositional changes in a dataset or to compute correction factors on a reference subset and apply them to all data. key2 Key to a categorical annotation from :attr:`~anndata.AnnData.obs` that will be used for batch effect removal. Has to contain at most as many categories as the key parameter. Returns ------- Depending on the value of `inplace`, either returns the corrected matrix or or modifies `adata.X`. """""" # check the input if key not in adata.obs_keys(): raise ValueError('Could not find the key {!r} in adata.obs'.format(key)) if covariates is not None: cov_exist = np.isin(covariates, adata.obs_keys()) if np.any(~cov_exist): missing_cov = np.array(covariates)[~cov_exist].tolist() raise ValueError( 'Could not find the covariate(s) {!r} in adata.obs'.format(missing_cov) ) if key in covariates: raise ValueError('Batch key and covariates cannot overlap.') if len(covariates) != len(set(covariates)): raise ValueError('Covariates must be unique.') # only works on dense matrices so far if issparse(adata.X): X = adata.X.A.T else: X = adata.X.T data = pd.DataFrame(data=X, index=adata.var_names, columns=adata.obs_names,) sc.utils.sanitize_anndata(adata) #check if adata2 is present and meaningful format if adata2 is not None: if key2 not in adata2.obs_keys(): raise ValueError('Could not find the key {!r} in adata2.obs'.format(key)) if covariates is not None: if key2 in covariates: raise ValueError('Batch key and covariates cannot overlap.') # only works on dense matrices so far if issparse(adata2.X): X = adata2.X.A.T else: X2 = adata2.X.T data2 = pd.DataFrame(data=X2, index=adata2.var_names, columns=adata2.obs_names,) sc.utils.sanitize_anndata(adata2) # construct a pandas series of the batch annotation model = adata.obs[[key] + (covariates if covariates else [])] batch_info = model.groupby(key).indices.values() n_batch = len(batch_info) n_batches = np.array([len(v) for v in batch_info]) n_array = float(sum(n_batches)) # standardize across genes using a pooled variance estimator logg.info(""Standardizing Data across genes.\n"") s_data, design, var_pooled, stand_mean, grand_mean = _standardize_data(model, data, key) # fitting the parameters on the standardized data logg.info(""Fitting L/S model and finding priors\n"") batch_design = design[design.columns[:n_batch]] # first estimate of the additive batch effect gamma_hat = ( la.inv(batch_design.T @ batch_design) @ batch_design.T @ s_data.T ).values delta_hat = [] # first estimate for the multiplicative batch effect for i, batch_idxs in enumerate(batch_info): delta_hat.append(s_data.iloc[:, batch_idxs].var(axis=1)) # empirically fix the prior hyperparameters gamma_bar = gamma_hat.mean(axis=1) t2 = gamma_hat.var(axis=1) # a_prior and b_prior are the priors on lambda and theta from Johnson and Li (2006) a_prior = list(map(_aprior, delta_hat)) b_prior = list(map(_bprior, delta_hat)) logg.info(""Finding parametric adjustments\n"") # gamma star and delta star will be our empirical bayes (EB) estimators # for the additive and multiplicative batch effect per batch and cell gamma_star, delta_star = [], [] for i, batch_idxs in enumerate(batch_info): # temp stores our estimates for the batch effect parameters. # temp[0] is the additive batch effect # temp[1] is the multiplicative batch effect gamma, delta = _it_sol( s_data.iloc[:, batch_idxs].values, gamma_hat[i], delta_hat[i].values, gamma_bar[i], t2[i], a_prior[i], b_prior[i], ) gamma_star.append(gamma) delta_star.append(delta) logg.info(""Adjusting data\n"") bayesdata = s_data gamma_star = np.array(gamma_star) delta_star = np.array(delta_star) # we now apply the parametric adjustment to the standardized data from above # loop over all batches in the data for j, batch_idxs in enumerate(batch_info): # we basically substract the additive batch effect, rescale by the ratio # of multiplicative batch effect to pooled variance and add the overall gene # wise mean dsq = np.sqrt(delta_star[j, :]) dsq = dsq.reshape((len(dsq), 1)) denom = np.dot(dsq, np.ones((1, n_batches[j]))) numer = np.array( bayesdata.iloc[:, batch_idxs] - np.dot(batch_design.iloc[batch_idxs], gamma_star).T ) bayesdata.iloc[:, batch_idxs] = numer / denom vpsq = np.sqrt(var_pooled).reshape((len(var_pooled), 1)) bayesdata = bayesdata * np.dot(vpsq, np.ones((1, int(n_array)))) + stand_mean #online adaptation of a second dataframe if isinstance(data2, pd.DataFrame): #initialise variables model2 = adata2.obs[[key2]] batch_items2 = model2.groupby(key2).groups.items() batch_levels2, batch_info2 = zip(*batch_items2) which_batches2 = np.in1d(batch_levels, batch_levels2) n_batches = np.array([len(v) for v in batch_info2]) n_array = float(sum(n_batches)) idx = np.flatnonzero(np.invert(which_batches2)) # get empty levels non_idx =np.flatnonzero(which_batches2) for j in reversed(idx): #print(j) del batch_info[j] #remove empty levels del batch_levels[j] #remove empty levels #n_batch = len(batch_info2) #n_batches = n_batches[non_idx] #remove empty levels # drop intercept and create design matrix drop_cols = [cname for cname, inter in ((model2 == 1).all()).iteritems() if inter == True] drop_idxs = [list(model.columns).index(cdrop) for cdrop in drop_cols] model2 = model2[[c for c in model2.columns if not c in drop_cols]] numerical_covariates = [] design = _design_matrix(model2, key2, batch_levels2) batch_design = design[design.columns[:n_batch]] #pre-process data logg.info(""Standardizing additional Data across genes.\n"") stand_mean = np.dot(grand_mean.T.reshape((len(grand_mean), 1)), np.ones((1, int(n_array)))) vpsq = np.dot(np.sqrt(var_pooled).reshape((len(var_pooled), 1)), np.ones((1, int(n_array)))) s_data = ((data2 - stand_mean) / vpsq) # select the correct gamma_star and delta_star columns gamma_star_sub = gamma_star[non_idx,:] delta_star_sub = delta_star[non_idx,:] new_bayes = s_data #correct data logg.info(""Adjusting additional data\n"") for j, batch_idxs in enumerate(batch_info2): dsq = np.sqrt(delta_star_sub[j,:]) dsq = dsq.reshape((len(dsq), 1)) denom = np.dot(dsq, np.ones((1, n_batches[j]))) numer = np.array(new_bayes[batch_idxs] - np.dot(batch_design.loc[batch_idxs], gamma_star_sub).T) del dsq new_bayes[batch_idxs] = numer / denom new_bayes = new_bayes * vpsq + stand_mean bayesdata = np.concatenate([bayesdata, new_bayes], axis=1) #column bind # put back into the adata object or return if inplace and not isinstance(data2, pd.DataFrame): adata.X = bayesdata.values.transpose() elif isinstance(data2, pd.DataFrame): return bayesdata else: return bayesdata.values.transpose() ","def combat( adata: AnnData, key: str = 'batch', covariates: Optional[Collection[str]] = None, inplace: bool = True, adata2: AnnData = None, key2: str = 'batch', ) -> Union[AnnData, np.ndarray, None]: """"""\ ComBat function for batch effect correction [Johnson07]_ [Leek12]_ [Pedersen12]_. Corrects for batch effects by fitting linear models, gains statistical power via an EB framework where information is borrowed across genes. This uses the implementation `combat.py`_ [Pedersen12]_ and the adjustment for control cells to account for different composition [Boettcher20]_ . .. _combat.py: https://github.com/brentp/combat.py Parameters ---------- adata Annotated data matrix key Key to a categorical annotation from :attr:`~anndata.AnnData.obs` that will be used for batch effect removal. covariates Additional covariates besides the batch variable such as adjustment variables or biological condition. This parameter refers to the design matrix `X` in Equation 2.1 in [Johnson07]_ and to the `mod` argument in the original combat function in the sva R package. Note that not including covariates may introduce bias or lead to the removal of biological signal in unbalanced designs. inplace Whether to replace adata.X or to return the corrected data adata2 Annotated data matrix where the same correction will be applied as for adata. This is particularly useful to account for compositional changes in a dataset or to compute correction factors on a reference subset and apply them to all data. key2 Key to a categorical annotation from :attr:`~anndata.AnnData.obs` that will be used for batch effect removal. Has to contain at most as many categories as the key parameter. Returns ------- Depending on the value of `inplace`, either returns the corrected matrix or or modifies `adata.X`. """""" # check the input if key not in adata.obs_keys(): raise ValueError('Could not find the key {!r} in adata.obs'.format(key)) if covariates is not None: cov_exist = np.isin(covariates, adata.obs_keys()) if np.any(~cov_exist): missing_cov = np.array(covariates)[~cov_exist].tolist() raise ValueError( 'Could not find the covariate(s) {!r} in adata.obs'.format(missing_cov) ) if key in covariates: raise ValueError('Batch key and covariates cannot overlap.') if len(covariates) != len(set(covariates)): raise ValueError('Covariates must be unique.') # only works on dense matrices so far if issparse(adata.X): X = adata.X.A.T else: X = adata.X.T data = pd.DataFrame(data=X, index=adata.var_names, columns=adata.obs_names,) sc.utils.sanitize_anndata(adata) #check if adata2 is present and meaningful format if adata2 is not None: if key2 not in adata2.obs_keys(): raise ValueError('Could not find the key {!r} in adata2.obs'.format(key)) if covariates is not None: if key2 in covariates: raise ValueError('Batch key and covariates cannot overlap.') # only works on dense matrices so far data2 = adata2.to_df().T sc.utils.sanitize_anndata(adata2) # construct a pandas series of the batch annotation model = adata.obs[[key] + (covariates if covariates else [])] batch_info = model.groupby(key).indices.values() n_batch = len(batch_info) n_batches = np.array([len(v) for v in batch_info]) n_array = float(sum(n_batches)) # standardize across genes using a pooled variance estimator logg.info(""Standardizing Data across genes.\n"") s_data, design, var_pooled, stand_mean, grand_mean = _standardize_data(model, data, key) # fitting the parameters on the standardized data logg.info(""Fitting L/S model and finding priors\n"") batch_design = design[design.columns[:n_batch]] # first estimate of the additive batch effect gamma_hat = ( la.inv(batch_design.T @ batch_design) @ batch_design.T @ s_data.T ).values delta_hat = [] # first estimate for the multiplicative batch effect for i, batch_idxs in enumerate(batch_info): delta_hat.append(s_data.iloc[:, batch_idxs].var(axis=1)) # empirically fix the prior hyperparameters gamma_bar = gamma_hat.mean(axis=1) t2 = gamma_hat.var(axis=1) # a_prior and b_prior are the priors on lambda and theta from Johnson and Li (2006) a_prior = list(map(_aprior, delta_hat)) b_prior = list(map(_bprior, delta_hat)) logg.info(""Finding parametric adjustments\n"") # gamma star and delta star will be our empirical bayes (EB) estimators # for the additive and multiplicative batch effect per batch and cell gamma_star, delta_star = [], [] for i, batch_idxs in enumerate(batch_info): # temp stores our estimates for the batch effect parameters. # temp[0] is the additive batch effect # temp[1] is the multiplicative batch effect gamma, delta = _it_sol( s_data.iloc[:, batch_idxs].values, gamma_hat[i], delta_hat[i].values, gamma_bar[i], t2[i], a_prior[i], b_prior[i], ) gamma_star.append(gamma) delta_star.append(delta) logg.info(""Adjusting data\n"") bayesdata = s_data gamma_star = np.array(gamma_star) delta_star = np.array(delta_star) # we now apply the parametric adjustment to the standardized data from above # loop over all batches in the data for j, batch_idxs in enumerate(batch_info): # we basically substract the additive batch effect, rescale by the ratio # of multiplicative batch effect to pooled variance and add the overall gene # wise mean dsq = np.sqrt(delta_star[j, :]) dsq = dsq.reshape((len(dsq), 1)) denom = np.dot(dsq, np.ones((1, n_batches[j]))) numer = np.array( bayesdata.iloc[:, batch_idxs] - np.dot(batch_design.iloc[batch_idxs], gamma_star).T ) bayesdata.iloc[:, batch_idxs] = numer / denom vpsq = np.sqrt(var_pooled).reshape((len(var_pooled), 1)) bayesdata = bayesdata * np.dot(vpsq, np.ones((1, int(n_array)))) + stand_mean #online adaptation of a second dataframe if isinstance(data2, pd.DataFrame): #initialise variables model2 = adata2.obs[[key2]] batch_items2 = model2.groupby(key2).groups.items() batch_levels2, batch_info2 = zip(*batch_items2) which_batches2 = np.in1d(batch_levels, batch_levels2) n_batches = np.array([len(v) for v in batch_info2]) n_array = float(sum(n_batches)) idx = np.flatnonzero(np.invert(which_batches2)) # get empty levels non_idx =np.flatnonzero(which_batches2) for j in reversed(idx): #print(j) del batch_info[j] #remove empty levels del batch_levels[j] #remove empty levels #n_batch = len(batch_info2) #n_batches = n_batches[non_idx] #remove empty levels # drop intercept and create design matrix drop_cols = [cname for cname, inter in ((model2 == 1).all()).iteritems() if inter == True] drop_idxs = [list(model.columns).index(cdrop) for cdrop in drop_cols] model2 = model2[[c for c in model2.columns if not c in drop_cols]] numerical_covariates = [] design = _design_matrix(model2, key2, batch_levels2) batch_design = design[design.columns[:n_batch]] #pre-process data logg.info(""Standardizing additional Data across genes.\n"") stand_mean = np.dot(grand_mean.T.reshape((len(grand_mean), 1)), np.ones((1, int(n_array)))) vpsq = np.dot(np.sqrt(var_pooled).reshape((len(var_pooled), 1)), np.ones((1, int(n_array)))) s_data = ((data2 - stand_mean) / vpsq) # select the correct gamma_star and delta_star columns gamma_star_sub = gamma_star[non_idx,:] delta_star_sub = delta_star[non_idx,:] new_bayes = s_data #correct data logg.info(""Adjusting additional data\n"") for j, batch_idxs in enumerate(batch_info2): dsq = np.sqrt(delta_star_sub[j,:]) dsq = dsq.reshape((len(dsq), 1)) denom = np.dot(dsq, np.ones((1, n_batches[j]))) numer = np.array(new_bayes[batch_idxs] - np.dot(batch_design.loc[batch_idxs], gamma_star_sub).T) del dsq new_bayes[batch_idxs] = numer / denom new_bayes = new_bayes * vpsq + stand_mean bayesdata = np.concatenate([bayesdata, new_bayes], axis=1) #column bind # put back into the adata object or return if inplace and not isinstance(data2, pd.DataFrame): adata.X = bayesdata.values.transpose() elif isinstance(data2, pd.DataFrame): return bayesdata else: return bayesdata.values.transpose() " 44246,"def mutual_info(wires0, wires1, log_base=None): r""""""Mutual information between the subsystems prior to measurement: .. math:: I(A, B) = S(\rho^A) + S(\rho^B) - S(\rho^{AB}) where :math:`S` is the von Neumann entropy. The mutual information is a measure of correlation between two subsystems. More specifically, it quantifies the amount of information obtained about one system by measuring the other system. Args: wires0 (Sequence[int] or int): the wires of the first subsystem wires1 (Sequence[int] or int): the wires of the second subsystem log_base (float): Base for the logarithm. If None, the natural logarithm is used. **Example:** .. code-block:: python3 dev = qml.device(""default.qubit"", wires=2) @qml.qnode(dev) def circuit_mutual(x): qml.IsingXX(x, wires=[0, 1]) return qml.mutual_info(wires0=[0], wires1=[1]) Executing this QNode: >>> circuit_mutual(np.pi/2) 1.3862943611198906 It is also possible to get the gradient of the previous QNode: >>> param = pennylane.numpy.array(np.pi/4, requires_grad=True) >>> qml.grad(circuit_mutual)(param) 1.2464504802804612 .. note:: Calculating the derivative of :func:`~.mutual_info` is currently supported when using the classical backpropagation differentiation method (``diff_method=""backprop""``) with a compatible device and finite differences (``diff_method=""finite-diff""``). .. seealso:: :func:`~.vn_entropy` """""" # the subsystems cannot overlap if len([wire for wire in wires0 if wire in wires1]) > 0: raise qml.QuantumFunctionError( ""Subsystems for computing mutual information must not overlap."" ) wires0 = qml.wires.Wires(wires0) wires1 = qml.wires.Wires(wires1) return MeasurementProcess(MutualInfo, wires=[wires0, wires1], log_base=log_base) ","def mutual_info(wires0, wires1, log_base=None): r""""""Mutual information between the subsystems prior to measurement: .. math:: I(A, B) = S(\rho^A) + S(\rho^B) - S(\rho^{AB}) where :math:`S` is the von Neumann entropy. The mutual information is a measure of correlation between two subsystems. More specifically, it quantifies the amount of information obtained about one system by measuring the other system. Args: wires0 (Sequence[int] or int): the wires of the first subsystem wires1 (Sequence[int] or int): the wires of the second subsystem log_base (float): Base for the logarithm. If None, the natural logarithm is used. **Example:** .. code-block:: python3 dev = qml.device(""default.qubit"", wires=2) @qml.qnode(dev) def circuit_mutual(x): qml.IsingXX(x, wires=[0, 1]) return qml.mutual_info(wires0=[0], wires1=[1]) Executing this QNode: >>> circuit_mutual(np.pi/2) 1.3862943611198906 It is also possible to get the gradient of the previous QNode: >>> param = np.array(np.pi/4, requires_grad=True) >>> qml.grad(circuit_mutual)(param) 1.2464504802804612 .. note:: Calculating the derivative of :func:`~.mutual_info` is currently supported when using the classical backpropagation differentiation method (``diff_method=""backprop""``) with a compatible device and finite differences (``diff_method=""finite-diff""``). .. seealso:: :func:`~.vn_entropy` """""" # the subsystems cannot overlap if len([wire for wire in wires0 if wire in wires1]) > 0: raise qml.QuantumFunctionError( ""Subsystems for computing mutual information must not overlap."" ) wires0 = qml.wires.Wires(wires0) wires1 = qml.wires.Wires(wires1) return MeasurementProcess(MutualInfo, wires=[wires0, wires1], log_base=log_base) " 57847,"def dig_result(server: str, name: str): try: if server: server = f""@{server}"" dig_output = subprocess.check_output( ['dig', server, name, '+short', '+identify'], stderr=subprocess.STDOUT, universal_newlines=True ) if not dig_output: raise ValueError(""Couldn't find A record for:\n"" + name) resolved_addresses, dns_server = regex_result(dig_output, reverse_lookup=False) return {""name"": name, ""resolvedaddresses"": resolved_addresses, ""nameserver"": dns_server} else: dig_output = subprocess.check_output( ['dig', name, '+short', '+identify'], stderr=subprocess.STDOUT, universal_newlines=True ) if not dig_output: raise ValueError(""Couldn't find A record for:\n"" + name) resolved_addresses, dns_server = regex_result(dig_output, reverse_lookup=False) return {""name"": name, ""resolvedaddresses"": resolved_addresses, ""nameserver"": dns_server} except Exception as e: if isinstance(e, subprocess.CalledProcessError): msg = e.output # pylint: disable=no-member else: msg = str(e) return_error(msg) ","def dig_result(server: str, name: str): try: if server: server = f""@{server}"" dig_output = subprocess.check_output( ['dig', server, name, '+short', '+identify'], stderr=subprocess.STDOUT, universal_newlines=True ) if not dig_output: raise ValueError(""Couldn't find A record for:\n"" + name) resolved_addresses, dns_server = regex_result(dig_output, reverse_lookup=False) return {""name"": name, ""resolvedaddresses"": resolved_addresses, ""nameserver"": dns_server} else: dig_output = subprocess.check_output( ['dig', name, '+short', '+identify'], stderr=subprocess.STDOUT, universal_newlines=True ) if not dig_output: raise ValueError(""Couldn't find A record for:\n"" + name) resolved_addresses, dns_server = regex_result(dig_output, reverse_lookup=False) return {""name"": name, ""resolvedaddresses"": resolved_addresses, ""nameserver"": dns_server} except subprocess.CalledProcessError as e: return_error(e.output) " 58302,"def install_packages_dnf( config: MkosiConfig, state: MkosiState, packages: Set[str], ) -> None: packages = make_rpm_list(config, state, packages) invoke_dnf(config, state.root, 'install', packages) ","def install_packages_dnf( config: MkosiConfig, state: MkosiState, packages: Set[str], ) -> None: packages = make_rpm_list(config, state, packages) invoke_dnf(config, state.root, 'install', packages) " 47963,"def main(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--download_dir', type=Path, metavar='DIR', default=Path.cwd(), help='root of the directory tree with downloaded model files') parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR', help='root of the directory tree to place converted files into') parser.add_argument('--name', metavar='PAT[,PAT...]', help='convert only models whose names match at least one of the specified patterns') parser.add_argument('--list', type=Path, metavar='FILE.LST', help='convert only models whose names match at least one of the patterns in the specified file') parser.add_argument('--all', action='store_true', help='convert all available models') parser.add_argument('--print_all', action='store_true', help='print all available models') parser.add_argument('--precisions', metavar='PREC[,PREC...]', help='run only conversions that produce models with the specified precisions') parser.add_argument('-p', '--python', type=Path, metavar='PYTHON', default=sys.executable, help='Python executable to run Model Optimizer with') parser.add_argument('--mo', type=Path, metavar='MO.PY', help='Model Optimizer entry point script') parser.add_argument('--add_mo_arg', dest='extra_mo_args', metavar='ARG', action='append', help='Extra argument to pass to Model Optimizer') parser.add_argument('--dry_run', action='store_true', help='Print the conversion commands without running them') parser.add_argument('-j', '--jobs', type=num_jobs_arg, default=1, help='number of conversions to run concurrently') # aliases for backwards compatibility parser.add_argument('--add-mo-arg', dest='extra_mo_args', action='append', help=argparse.SUPPRESS) parser.add_argument('--dry-run', action='store_true', help=argparse.SUPPRESS) args = parser.parse_args() mo_path = args.mo if mo_path is None: try: mo_path = Path(os.environ['INTEL_OPENVINO_DIR']) / 'deployment_tools/model_optimizer/mo.py' # For OpenVINO from pip if mo_path is None: import mo mo_path = Path(mo.__file__).parents[2] / 'mo.py' except Exception: sys.exit('Unable to locate Model Optimizer. ' + 'Use --mo or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.') extra_mo_args = args.extra_mo_args or [] if args.precisions is None: requested_precisions = common.KNOWN_PRECISIONS else: requested_precisions = set(args.precisions.split(',')) unknown_precisions = requested_precisions - common.KNOWN_PRECISIONS if unknown_precisions: sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions)))) models = common.load_models_from_args(parser, args) output_dir = args.download_dir if args.output_dir is None else args.output_dir def convert(reporter, model): if model.mo_args is None: reporter.print_section_heading('Skipping {} (no conversions defined)', model.name) reporter.print() return True model_precisions = requested_precisions & model.precisions if not model_precisions: reporter.print_section_heading('Skipping {} (all conversions skipped)', model.name) reporter.print() return True (output_dir / model.subdirectory).mkdir(parents=True, exist_ok=True) if not run_pre_convert(reporter, model, output_dir, args): return False model_format = model.framework template_variables = dict( config_dir=common.MODEL_ROOT / model.subdirectory, conv_dir=output_dir / model.subdirectory, dl_dir=args.download_dir / model.subdirectory, mo_dir=mo_path.parent, ) if model.conversion_to_onnx_args: if not convert_to_onnx(reporter, model, output_dir, args, template_variables): return False model_format = 'onnx' expanded_mo_args = [ string.Template(arg).substitute(template_variables) for arg in model.mo_args] for model_precision in sorted(model_precisions): data_type = model_precision.split('-')[0] mo_cmd = [str(args.python), '--', str(mo_path), '--framework={}'.format(model_format), '--data_type={}'.format(data_type), '--output_dir={}'.format(output_dir / model.subdirectory / model_precision), '--model_name={}'.format(model.name), *expanded_mo_args, *extra_mo_args] reporter.print_section_heading('{}Converting {} to IR ({})', '(DRY RUN) ' if args.dry_run else '', model.name, model_precision) reporter.print('Conversion command: {}', common.command_string(mo_cmd)) if not args.dry_run: reporter.print(flush=True) if not reporter.job_context.subprocess(mo_cmd): return False reporter.print() return True reporter = common.Reporter(common.DirectOutputContext()) if args.jobs == 1 or args.dry_run: results = [convert(reporter, model) for model in models] else: results = common.run_in_parallel(args.jobs, lambda context, model: convert(common.Reporter(context), model), models) failed_models = [model.name for model, successful in zip(models, results) if not successful] if failed_models: reporter.print('FAILED:') for failed_model_name in failed_models: reporter.print(failed_model_name) sys.exit(1) ","def main(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--download_dir', type=Path, metavar='DIR', default=Path.cwd(), help='root of the directory tree with downloaded model files') parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR', help='root of the directory tree to place converted files into') parser.add_argument('--name', metavar='PAT[,PAT...]', help='convert only models whose names match at least one of the specified patterns') parser.add_argument('--list', type=Path, metavar='FILE.LST', help='convert only models whose names match at least one of the patterns in the specified file') parser.add_argument('--all', action='store_true', help='convert all available models') parser.add_argument('--print_all', action='store_true', help='print all available models') parser.add_argument('--precisions', metavar='PREC[,PREC...]', help='run only conversions that produce models with the specified precisions') parser.add_argument('-p', '--python', type=Path, metavar='PYTHON', default=sys.executable, help='Python executable to run Model Optimizer with') parser.add_argument('--mo', type=Path, metavar='MO.PY', help='Model Optimizer entry point script') parser.add_argument('--add_mo_arg', dest='extra_mo_args', metavar='ARG', action='append', help='Extra argument to pass to Model Optimizer') parser.add_argument('--dry_run', action='store_true', help='Print the conversion commands without running them') parser.add_argument('-j', '--jobs', type=num_jobs_arg, default=1, help='number of conversions to run concurrently') # aliases for backwards compatibility parser.add_argument('--add-mo-arg', dest='extra_mo_args', action='append', help=argparse.SUPPRESS) parser.add_argument('--dry-run', action='store_true', help=argparse.SUPPRESS) args = parser.parse_args() mo_path = args.mo if mo_path is None: try: mo_path = Path(os.environ['INTEL_OPENVINO_DIR']) / 'deployment_tools/model_optimizer/mo.py' # For OpenVINO from PyPI if mo_path is None: import mo mo_path = Path(mo.__file__).parents[2] / 'mo.py' except Exception: sys.exit('Unable to locate Model Optimizer. ' + 'Use --mo or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.') extra_mo_args = args.extra_mo_args or [] if args.precisions is None: requested_precisions = common.KNOWN_PRECISIONS else: requested_precisions = set(args.precisions.split(',')) unknown_precisions = requested_precisions - common.KNOWN_PRECISIONS if unknown_precisions: sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions)))) models = common.load_models_from_args(parser, args) output_dir = args.download_dir if args.output_dir is None else args.output_dir def convert(reporter, model): if model.mo_args is None: reporter.print_section_heading('Skipping {} (no conversions defined)', model.name) reporter.print() return True model_precisions = requested_precisions & model.precisions if not model_precisions: reporter.print_section_heading('Skipping {} (all conversions skipped)', model.name) reporter.print() return True (output_dir / model.subdirectory).mkdir(parents=True, exist_ok=True) if not run_pre_convert(reporter, model, output_dir, args): return False model_format = model.framework template_variables = dict( config_dir=common.MODEL_ROOT / model.subdirectory, conv_dir=output_dir / model.subdirectory, dl_dir=args.download_dir / model.subdirectory, mo_dir=mo_path.parent, ) if model.conversion_to_onnx_args: if not convert_to_onnx(reporter, model, output_dir, args, template_variables): return False model_format = 'onnx' expanded_mo_args = [ string.Template(arg).substitute(template_variables) for arg in model.mo_args] for model_precision in sorted(model_precisions): data_type = model_precision.split('-')[0] mo_cmd = [str(args.python), '--', str(mo_path), '--framework={}'.format(model_format), '--data_type={}'.format(data_type), '--output_dir={}'.format(output_dir / model.subdirectory / model_precision), '--model_name={}'.format(model.name), *expanded_mo_args, *extra_mo_args] reporter.print_section_heading('{}Converting {} to IR ({})', '(DRY RUN) ' if args.dry_run else '', model.name, model_precision) reporter.print('Conversion command: {}', common.command_string(mo_cmd)) if not args.dry_run: reporter.print(flush=True) if not reporter.job_context.subprocess(mo_cmd): return False reporter.print() return True reporter = common.Reporter(common.DirectOutputContext()) if args.jobs == 1 or args.dry_run: results = [convert(reporter, model) for model in models] else: results = common.run_in_parallel(args.jobs, lambda context, model: convert(common.Reporter(context), model), models) failed_models = [model.name for model, successful in zip(models, results) if not successful] if failed_models: reporter.print('FAILED:') for failed_model_name in failed_models: reporter.print(failed_model_name) sys.exit(1) " 17728,"def RB_to_CHARMM(c0, c1, c2, c3, c4, c5): """"""Converts Ryckaert-Bellemans (RB) type dihedrals to CHARMM type or RB_torsions = c0 + c1*Cos[Psi] + c2*Cos[Psi]^2 + c3*CosPsi]^3 + c4*Cos[Psi]^4 + c5*Cos[5*Psi]^5 where Psi= t-Pi = t - 180 degress Parameters ---------- c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol) converts to: CHARMM_torsions = = K0 * (1 + Cos[n0*(t) - (d0)] ) + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] ) + K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) . = K0 + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] ) + K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) . Returns ------- 0, K1, K2, K3, K4, K5, n0, n1, n2, n3, n4, n5, d0, d1, d2, d3, d4, and d5 : Charmm coefficients (in kcal/mol) CHARMM_ dihedral coeffs : np.matrix, shape=(6,3) Array containing the CHARMM dihedral coeffs [[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3], [K4, n4, d4], [K5, n5, d5]] (in kcal/mol) """""" # see below or the long version is, K0 = (c0 + c2 / 2 + 3 / 8 * c4) - K1 - K2 - K3 - K4 - K5 K0 = (c0 -c1 - c3 - c4/4 - c5) K1 = (+c1 + 3/4 * c3 + 5/8 * c5) K2 = (+(1/2) * c2 + 1/2 * c4) K3 = (+(1/4) * c3 + 5/16 * c5) K4 = (+(1/8) * c4) K5 = (+(1/16) * c5) n0 = 0 n1 = 1 n2 = 2 n3 = 3 n4 = 4 n5 = 5 d0 = 90 d1 = 180 d2 = 0 d3 = 180 d4 = 0 d5 = 180 return np.matrix([[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3], [K4, n4, d4], [K5, n5, d5]]) ","def RB_to_CHARMM(c0, c1, c2, c3, c4, c5): """"""Converts Ryckaert-Bellemans (RB) type dihedrals to CHARMM type or RB_torsions = c0 + c1*Cos[Psi] + c2*Cos[Psi]^2 + c3*CosPsi]^3 + c4*Cos[Psi]^4 + c5*Cos[5*Psi]^5 where Psi= t-Pi = t - 180 degress Parameters ---------- c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol) converts to: CHARMM_torsions = = K0 * (1 + Cos[n0*(t) - (d0)] ) + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] ) + K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) . = K0 + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] ) + K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) . Returns ------- K0, K1, K2, K3, K4, K5, n0, n1, n2, n3, n4, n5, d0, d1, d2, d3, d4, and d5 : Charmm coefficients (in kcal/mol) CHARMM_ dihedral coeffs : np.matrix, shape=(6,3) Array containing the CHARMM dihedral coeffs [[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3], [K4, n4, d4], [K5, n5, d5]] (in kcal/mol) """""" # see below or the long version is, K0 = (c0 + c2 / 2 + 3 / 8 * c4) - K1 - K2 - K3 - K4 - K5 K0 = (c0 -c1 - c3 - c4/4 - c5) K1 = (+c1 + 3/4 * c3 + 5/8 * c5) K2 = (+(1/2) * c2 + 1/2 * c4) K3 = (+(1/4) * c3 + 5/16 * c5) K4 = (+(1/8) * c4) K5 = (+(1/16) * c5) n0 = 0 n1 = 1 n2 = 2 n3 = 3 n4 = 4 n5 = 5 d0 = 90 d1 = 180 d2 = 0 d3 = 180 d4 = 0 d5 = 180 return np.matrix([[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3], [K4, n4, d4], [K5, n5, d5]]) " 57869,"def exclude_lists(original: List[dict], exclude: List[dict], key: str): """"""Exclude nodes of exclude list from the original list by key Args: original: The original list to exclude from exclude: The list of nodes to exclude key: The key to exclude by Returns: A list with the original nodes that were not excluded. """""" new_list = original.copy() exclude_keys = [excluded_node.get(key) for excluded_node in exclude] for element in original: if element.get(key) in exclude_keys: new_list.remove(element) return new_list ","def exclude_lists(original: List[dict], exclude: List[dict], key: str): """"""Exclude nodes of exclude list from the original list by key Args: original: The original list to exclude from exclude: The list of nodes to exclude key: The key to exclude by Returns: A list with the original nodes that were not excluded. """""" exclude_keys = [excluded_node.get(key) for excluded_node in exclude] return [element for element in original if element.get(key) not in exclude_keys] " 55944,"def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith("".json""): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f""Output directory ({training_args.output_dir}) already exists and is not empty. "" ""Use --overwrite_output_dir to overcome."" ) elif last_checkpoint is not None: logger.info( f""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, pass "" ""`--overwrite_output_dir` to train from scratch."" ) # Setup logging logging.basicConfig( format=""%(asctime)s - %(levelname)s - %(name)s - %(message)s"", datefmt=""%m/%d/%Y %H:%M:%S"", level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN, ) # Log on each process the small summary: logger.warning( f""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"" + f""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f""Training/evaluation parameters {training_args}"") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(""glue"", data_args.task_name) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {""train"": data_args.train_file, ""validation"": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(""."")[-1] test_extension = data_args.test_file.split(""."")[-1] assert ( test_extension == train_extension ), ""`test_file` should have the same extension (csv or json) as `train_file`."" data_files[""test""] = data_args.test_file else: raise ValueError(""Need either a GLUE task or a test file for `do_predict`."") for key in data_files.keys(): logger.info(f""load a local file for {key}: {data_files[key]}"") if data_args.train_file.endswith("".csv""): # Loading a dataset from local csv files datasets = load_dataset(""csv"", data_files=data_files) else: # Loading a dataset from local json files datasets = load_dataset(""json"", data_files=data_files) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == ""stsb"" if not is_regression: label_list = datasets[""train""].features[""label""].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = datasets[""train""].features[""label""].dtype in [""float32"", ""float64""] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique label_list = datasets[""train""].unique(""label"") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool("".ckpt"" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Preprocessing the datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in datasets[""train""].column_names if name != ""label""] if ""sentence1"" in non_label_column_names and ""sentence2"" in non_label_column_names: sentence1_key, sentence2_key = ""sentence1"", ""sentence2"" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Padding strategy if data_args.pad_to_max_length: padding = ""max_length"" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)} else: logger.warn( ""Your model seems to have been trained with labels, but they don't match the dataset: "", f""model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."" ""\nIgnoring the model labels as a result."", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=data_args.max_seq_length, truncation=True) # Map labels to IDs (not necessary for GLUE tasks) if label_to_id is not None and ""label"" in examples: result[""label""] = [label_to_id[l] for l in examples[""label""]] return result datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) train_dataset = datasets[""train""] eval_dataset = datasets[""validation_matched"" if data_args.task_name == ""mnli"" else ""validation""] if data_args.task_name is not None or data_args.test_file is not None: test_dataset = datasets[""test_matched"" if data_args.task_name == ""mnli"" else ""test""] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f""Sample {index} of the training set: {train_dataset[index]}."") # Get the metric function if data_args.task_name is not None: metric = load_metric(""glue"", data_args.task_name) # TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from # compute_metrics # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) if data_args.task_name is not None: result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result[""combined_score""] = np.mean(list(result.values())).item() return result elif is_regression: return {""mse"": ((preds - p.label_ids) ** 2).mean().item()} else: return {""accuracy"": (preds == p.label_ids).astype(np.float32).mean().item()} # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. data_collator=default_data_collator if data_args.pad_to_max_length else None, ) # Training if training_args.do_train: if last_checkpoint is not None: model_path = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): model_path = model_args.model_name_or_path else: model_path = None train_result = trainer.train(model_path=model_path) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload output_train_file = os.path.join(training_args.output_dir, ""train_results.txt"") if trainer.is_world_process_zero(): with open(output_train_file, ""w"") as writer: logger.info(""***** Train results *****"") for key, value in sorted(metrics.items()): logger.info(f"" {key} = {value}"") writer.write(f""{key} = {value}\n"") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, ""trainer_state.json"")) # Evaluation eval_results = {} if training_args.do_eval: logger.info(""*** Evaluate ***"") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == ""mnli"": tasks.append(""mnli-mm"") eval_datasets.append(datasets[""validation_mismatched""]) for eval_dataset, task in zip(eval_datasets, tasks): eval_result = trainer.evaluate(eval_dataset=eval_dataset) output_eval_file = os.path.join(training_args.output_dir, f""eval_results_{task}.txt"") if trainer.is_world_process_zero(): with open(output_eval_file, ""w"") as writer: logger.info(f""***** Eval results {task} *****"") for key, value in sorted(eval_result.items()): logger.info(f"" {key} = {value}"") writer.write(f""{key} = {value}\n"") eval_results.update(eval_result) if training_args.do_predict: logger.info(""*** Test ***"") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] test_datasets = [test_dataset] if data_args.task_name == ""mnli"": tasks.append(""mnli-mm"") test_datasets.append(datasets[""test_mismatched""]) for test_dataset, task in zip(test_datasets, tasks): # Removing the `label` columns because it contains -1 and Trainer won't like that. test_dataset.remove_columns_(""label"") predictions = trainer.predict(test_dataset=test_dataset).predictions predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) output_test_file = os.path.join(training_args.output_dir, f""test_results_{task}.txt"") if trainer.is_world_process_zero(): with open(output_test_file, ""w"") as writer: logger.info(f""***** Test results {task} *****"") writer.write(""index\tprediction\n"") for index, item in enumerate(predictions): if is_regression: writer.write(f""{index}\t{item:3.3f}\n"") else: item = label_list[item] writer.write(f""{index}\t{item}\n"") return eval_results ","def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith("".json""): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f""Output directory ({training_args.output_dir}) already exists and is not empty. "" ""Use --overwrite_output_dir to overcome."" ) elif last_checkpoint is not None: logger.info( f""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "" ""the `--output_dir` or add `--overwrite_output_dir` to train from scratch."" ) # Setup logging logging.basicConfig( format=""%(asctime)s - %(levelname)s - %(name)s - %(message)s"", datefmt=""%m/%d/%Y %H:%M:%S"", level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN, ) # Log on each process the small summary: logger.warning( f""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"" + f""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f""Training/evaluation parameters {training_args}"") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(""glue"", data_args.task_name) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {""train"": data_args.train_file, ""validation"": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(""."")[-1] test_extension = data_args.test_file.split(""."")[-1] assert ( test_extension == train_extension ), ""`test_file` should have the same extension (csv or json) as `train_file`."" data_files[""test""] = data_args.test_file else: raise ValueError(""Need either a GLUE task or a test file for `do_predict`."") for key in data_files.keys(): logger.info(f""load a local file for {key}: {data_files[key]}"") if data_args.train_file.endswith("".csv""): # Loading a dataset from local csv files datasets = load_dataset(""csv"", data_files=data_files) else: # Loading a dataset from local json files datasets = load_dataset(""json"", data_files=data_files) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == ""stsb"" if not is_regression: label_list = datasets[""train""].features[""label""].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = datasets[""train""].features[""label""].dtype in [""float32"", ""float64""] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique label_list = datasets[""train""].unique(""label"") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool("".ckpt"" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Preprocessing the datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in datasets[""train""].column_names if name != ""label""] if ""sentence1"" in non_label_column_names and ""sentence2"" in non_label_column_names: sentence1_key, sentence2_key = ""sentence1"", ""sentence2"" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Padding strategy if data_args.pad_to_max_length: padding = ""max_length"" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)} else: logger.warn( ""Your model seems to have been trained with labels, but they don't match the dataset: "", f""model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."" ""\nIgnoring the model labels as a result."", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=data_args.max_seq_length, truncation=True) # Map labels to IDs (not necessary for GLUE tasks) if label_to_id is not None and ""label"" in examples: result[""label""] = [label_to_id[l] for l in examples[""label""]] return result datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) train_dataset = datasets[""train""] eval_dataset = datasets[""validation_matched"" if data_args.task_name == ""mnli"" else ""validation""] if data_args.task_name is not None or data_args.test_file is not None: test_dataset = datasets[""test_matched"" if data_args.task_name == ""mnli"" else ""test""] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f""Sample {index} of the training set: {train_dataset[index]}."") # Get the metric function if data_args.task_name is not None: metric = load_metric(""glue"", data_args.task_name) # TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from # compute_metrics # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) if data_args.task_name is not None: result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result[""combined_score""] = np.mean(list(result.values())).item() return result elif is_regression: return {""mse"": ((preds - p.label_ids) ** 2).mean().item()} else: return {""accuracy"": (preds == p.label_ids).astype(np.float32).mean().item()} # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. data_collator=default_data_collator if data_args.pad_to_max_length else None, ) # Training if training_args.do_train: if last_checkpoint is not None: model_path = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): model_path = model_args.model_name_or_path else: model_path = None train_result = trainer.train(model_path=model_path) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload output_train_file = os.path.join(training_args.output_dir, ""train_results.txt"") if trainer.is_world_process_zero(): with open(output_train_file, ""w"") as writer: logger.info(""***** Train results *****"") for key, value in sorted(metrics.items()): logger.info(f"" {key} = {value}"") writer.write(f""{key} = {value}\n"") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, ""trainer_state.json"")) # Evaluation eval_results = {} if training_args.do_eval: logger.info(""*** Evaluate ***"") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == ""mnli"": tasks.append(""mnli-mm"") eval_datasets.append(datasets[""validation_mismatched""]) for eval_dataset, task in zip(eval_datasets, tasks): eval_result = trainer.evaluate(eval_dataset=eval_dataset) output_eval_file = os.path.join(training_args.output_dir, f""eval_results_{task}.txt"") if trainer.is_world_process_zero(): with open(output_eval_file, ""w"") as writer: logger.info(f""***** Eval results {task} *****"") for key, value in sorted(eval_result.items()): logger.info(f"" {key} = {value}"") writer.write(f""{key} = {value}\n"") eval_results.update(eval_result) if training_args.do_predict: logger.info(""*** Test ***"") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] test_datasets = [test_dataset] if data_args.task_name == ""mnli"": tasks.append(""mnli-mm"") test_datasets.append(datasets[""test_mismatched""]) for test_dataset, task in zip(test_datasets, tasks): # Removing the `label` columns because it contains -1 and Trainer won't like that. test_dataset.remove_columns_(""label"") predictions = trainer.predict(test_dataset=test_dataset).predictions predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) output_test_file = os.path.join(training_args.output_dir, f""test_results_{task}.txt"") if trainer.is_world_process_zero(): with open(output_test_file, ""w"") as writer: logger.info(f""***** Test results {task} *****"") writer.write(""index\tprediction\n"") for index, item in enumerate(predictions): if is_regression: writer.write(f""{index}\t{item:3.3f}\n"") else: item = label_list[item] writer.write(f""{index}\t{item}\n"") return eval_results " 7543,"def get_pkg_data_path(*path, package=None): """"""Make path from source-included data directories. Parameters ---------- *path : str Name/location of the desired data file/directory. May be a tuple of strings -- for ``os.path`` intelligent path joining. package : str, optional, keyword only If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. Returns ------- path : str Name/location of the desired data file/directory. """""" if package is None: module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib']) if module is None: # not called from inside an astropy package. So just pass name # through return os.path.join(*path) if not hasattr(module, '__package__') or not module.__package__: # The __package__ attribute may be missing or set to None; see # PEP-366, also astropy issue #1256 if '.' in module.__name__: package = module.__name__.rpartition('.')[0] else: package = module.__name__ else: package = module.__package__ else: module = resolve_name(package) rootpkgname = package.partition('.')[0] rootpkg = resolve_name(rootpkgname) module_path = os.path.dirname(module.__file__) path = os.path.join(module_path, *path) root_dir = os.path.dirname(rootpkg.__file__) if not _is_inside(path, root_dir): raise RuntimeError(f""attempted to get a local data file outside "" f""of the {rootpkgname} tree."") return path ","def get_pkg_data_path(path, package=None): """"""Make path from source-included data directories. Parameters ---------- *path : str Name/location of the desired data file/directory. May be a tuple of strings -- for ``os.path`` intelligent path joining. package : str, optional, keyword only If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. Returns ------- path : str Name/location of the desired data file/directory. """""" if package is None: module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib']) if module is None: # not called from inside an astropy package. So just pass name # through return os.path.join(*path) if not hasattr(module, '__package__') or not module.__package__: # The __package__ attribute may be missing or set to None; see # PEP-366, also astropy issue #1256 if '.' in module.__name__: package = module.__name__.rpartition('.')[0] else: package = module.__name__ else: package = module.__package__ else: module = resolve_name(package) rootpkgname = package.partition('.')[0] rootpkg = resolve_name(rootpkgname) module_path = os.path.dirname(module.__file__) path = os.path.join(module_path, *path) root_dir = os.path.dirname(rootpkg.__file__) if not _is_inside(path, root_dir): raise RuntimeError(f""attempted to get a local data file outside "" f""of the {rootpkgname} tree."") return path " 22654,"def _generate_rst(gallery_conf, fname, content): """""" Helper function returning the rst text a given example content. This writes a file gallery_conf['examples_dir']/fname with *content*, creates the corresponding rst file by running generate_file_rst() and returns the generated rest code. Parameters ---------- gallery_conf A gallery_conf as cerated by the gallery_conf fixture. fname : str A filename; e.g. 'test.py'. This is relative to gallery_conf['examples_dir'] content : str The content of fname. Returns ------- rst : str The generated rst code. """""" with codecs.open(os.path.join(gallery_conf['examples_dir'], fname), mode='w', encoding='utf-8') as f: f.write('\n'.join(content)) # generate rst file sg.generate_file_rst(fname, gallery_conf['gallery_dir'], gallery_conf['examples_dir'], gallery_conf) # read rst file and check if it contains code output rst_fname = os.path.splitext(fname)[0] + '.rst' with codecs.open(os.path.join(gallery_conf['gallery_dir'], rst_fname), mode='r', encoding='utf-8') as f: rst = f.read() return rst ","def _generate_rst(gallery_conf, fname, content): """""" Helper function returning the rst text a given example content. This writes a file gallery_conf['examples_dir']/fname with *content*, creates the corresponding rst file by running generate_file_rst() and returns the generated rest code. Parameters ---------- gallery_conf A gallery_conf as created by the gallery_conf fixture. fname : str A filename; e.g. 'test.py'. This is relative to gallery_conf['examples_dir'] content : str The content of fname. Returns ------- rst : str The generated rst code. """""" with codecs.open(os.path.join(gallery_conf['examples_dir'], fname), mode='w', encoding='utf-8') as f: f.write('\n'.join(content)) # generate rst file sg.generate_file_rst(fname, gallery_conf['gallery_dir'], gallery_conf['examples_dir'], gallery_conf) # read rst file and check if it contains code output rst_fname = os.path.splitext(fname)[0] + '.rst' with codecs.open(os.path.join(gallery_conf['gallery_dir'], rst_fname), mode='r', encoding='utf-8') as f: rst = f.read() return rst " 10343,"def get_agent_info_api_endpoint(info_key): return AGENT_INFO_API_MAP[info_key] if info_key in AGENT_INFO_API_MAP else info_key ","def get_agent_info_api_endpoint(info_key): return AGENT_INFO_API_MAP.get(info_key, info_key) " 2724,"def _generate_invalid_param_val_interval(interval, constraints): """"""Return a value that does not satisfy an interval constraint. Generating a invalid value for an integer interval depends on the other constraints since an int is a real, meaning that it can be valid for a real interval. Assumes that there can be at most 2 interval constraints: one integer interval and/or one real interval. This is only useful for testing purpose. Parameters ---------- interval : Interval The interval to generate a value for. constraints : list of Constraint The list of all constraints for this parameter. Returns ------- val : object A value that does not satisfy the interval constraint. """""" if interval.left is None and interval.right is None: raise NotImplementedError if interval.type is Real: # generate a non-integer value such that it can't be valid even if there's also # an integer interval constraint. if interval.left is not None: return np.floor(interval.left) - 0.5 else: # right is not None return np.ceil(interval.right) + 0.5 else: # interval.type is Integral # We need to check if there's also a real interval constraint to generate a # value that is not valid for any of the 2 interval constraints. real_intervals = [ i for i in constraints if isinstance(i, Interval) and i.type is Real ] real_interval = real_intervals[0] if real_intervals else None if real_interval is None: # Only the integer interval constraint -> easy if interval.left is not None: return interval.left - 1 else: return interval.right + 1 # There's also a real interval constraint. Try to find a value left to both or # right to both or in between them. # redefine left and right bounds to be smallest and largest valid integers in # both intervals. int_left = interval.left if int_left is not None and interval.closed in (""right"", ""neither""): int_left = int_left + 1 int_right = interval.right if int_right is not None and interval.closed in (""left"", ""neither""): int_right = int_right - 1 real_left = real_interval.left if real_interval.left is not None: real_left = int(np.ceil(real_interval.left)) if real_interval.closed in (""right"", ""neither""): real_left = real_left + 1 real_right = real_interval.right if real_interval.right is not None: real_right = int(np.floor(real_interval.right)) if real_interval.closed in (""left"", ""neither""): real_right = real_right - 1 if int_left is not None and real_left is not None: # there exists an int left to both intervals return min(int_left, real_left) - 1 if int_right is not None and real_right is not None: # there exists an int right to both intervals return max(int_right, real_right) + 1 if int_left is not None: if real_right is not None and int_left - real_right >= 2: # there exists an int between the 2 intervals return int_left - 1 else: raise NotImplementedError else: # int_right is not None if real_left is not None and real_left - int_right >= 2: # there exists an int between the 2 intervals return int_right + 1 else: raise NotImplementedError ","def _generate_invalid_param_val_interval(interval, constraints): """"""Return a value that does not satisfy an interval constraint. Generating an invalid value for an integer interval depends on the other constraints since an int is a real, meaning that it can be valid for a real interval. Assumes that there can be at most 2 interval constraints: one integer interval and/or one real interval. This is only useful for testing purpose. Parameters ---------- interval : Interval The interval to generate a value for. constraints : list of Constraint The list of all constraints for this parameter. Returns ------- val : object A value that does not satisfy the interval constraint. """""" if interval.left is None and interval.right is None: raise NotImplementedError if interval.type is Real: # generate a non-integer value such that it can't be valid even if there's also # an integer interval constraint. if interval.left is not None: return np.floor(interval.left) - 0.5 else: # right is not None return np.ceil(interval.right) + 0.5 else: # interval.type is Integral # We need to check if there's also a real interval constraint to generate a # value that is not valid for any of the 2 interval constraints. real_intervals = [ i for i in constraints if isinstance(i, Interval) and i.type is Real ] real_interval = real_intervals[0] if real_intervals else None if real_interval is None: # Only the integer interval constraint -> easy if interval.left is not None: return interval.left - 1 else: return interval.right + 1 # There's also a real interval constraint. Try to find a value left to both or # right to both or in between them. # redefine left and right bounds to be smallest and largest valid integers in # both intervals. int_left = interval.left if int_left is not None and interval.closed in (""right"", ""neither""): int_left = int_left + 1 int_right = interval.right if int_right is not None and interval.closed in (""left"", ""neither""): int_right = int_right - 1 real_left = real_interval.left if real_interval.left is not None: real_left = int(np.ceil(real_interval.left)) if real_interval.closed in (""right"", ""neither""): real_left = real_left + 1 real_right = real_interval.right if real_interval.right is not None: real_right = int(np.floor(real_interval.right)) if real_interval.closed in (""left"", ""neither""): real_right = real_right - 1 if int_left is not None and real_left is not None: # there exists an int left to both intervals return min(int_left, real_left) - 1 if int_right is not None and real_right is not None: # there exists an int right to both intervals return max(int_right, real_right) + 1 if int_left is not None: if real_right is not None and int_left - real_right >= 2: # there exists an int between the 2 intervals return int_left - 1 else: raise NotImplementedError else: # int_right is not None if real_left is not None and real_left - int_right >= 2: # there exists an int between the 2 intervals return int_right + 1 else: raise NotImplementedError " 58098,"def main() -> None: token_url = demisto.params().get('token_url') org_id = demisto.params().get('orgId') api_key = demisto.params().get('apiKey') api_secret = demisto.params().get('apiSecret') base_url = demisto.params()['url'] verify_certificate = not demisto.params().get('insecure', False) proxy = demisto.params().get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') commands = { 'umbrella-get-summary': get_summary_command, 'umbrella-list-top-threats': list_top_threats_command, } try: product_auth = UmbrellaAuthAPI(token_url, api_key, api_secret) access_token = product_auth.get_access_token()[""access_token""] headers: Dict = { ""Authorization"": f""Bearer {access_token}"" } client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy, org_id=org_id) if command == 'test-module': # This is the call made when pressing the integration Test button. result = test_module(client) return_results(result) elif command in commands: return_results(commands[command](client, demisto.args())) else: raise NotImplementedError(f'Command ""{command}"" is not implemented.') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError:\n{str(e)}') ","def main() -> None: token_url = demisto.params().get('token_url') org_id = demisto.params().get('orgId') api_key = params.get('apiKey') api_secret = params.get('apiSecret') base_url = params['url'] verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') commands = { 'umbrella-get-summary': get_summary_command, 'umbrella-list-top-threats': list_top_threats_command, } try: product_auth = UmbrellaAuthAPI(token_url, api_key, api_secret) access_token = product_auth.get_access_token()[""access_token""] headers: Dict = { ""Authorization"": f""Bearer {access_token}"" } client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy, org_id=org_id) if command == 'test-module': # This is the call made when pressing the integration Test button. result = test_module(client) return_results(result) elif command in commands: return_results(commands[command](client, demisto.args())) else: raise NotImplementedError(f'Command ""{command}"" is not implemented.') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError:\n{str(e)}') " 31351,"def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" ''' EXECUTION ''' #LOG('command is %s' % (demisto.command(), )) demisto.debug(f'Command being called is {demisto.command()}') try: LOG('Command being called is {command}'.format(command=demisto.command())) if demisto.command() == 'Picus-GetAccessToken': getAccessToken() elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results token = getAccessToken() demisto.results(vectorCompare(token)) elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(attackResultList(token)) elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional token = getAccessToken() demisto.results(specificThreatsResults(token)) elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses token = getAccessToken() demisto.results(peerList(token)) elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses token = getAccessToken() demisto.results(eMailPeerList(token)) elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors token = getAccessToken() demisto.results(attackAllVectors(token)) elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector token = getAccessToken() demisto.results(attackSingle(token)) elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully token = getAccessToken() demisto.results(triggerUpdate(token)) elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config token = getAccessToken() demisto.results(version(token)) elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(threatList(token)) elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(mitigationList(token)) elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters token = getAccessToken() demisto.results(mitreMatrix(token)) elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(sigmaRulesList(token)) elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination token = getAccessToken() demisto.results(vectorList(token)) elif demisto.command() == 'test-module': demisto.results(test_module()) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') ","def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" ''' EXECUTION ''' #LOG('command is %s' % (demisto.command(), )) demisto.debug(f'Command being called is {demisto.command()}') try: LOG('Command being called is {command}'.format(command=demisto.command())) if demisto.command() == 'Picus-GetAccessToken': getAccessToken() elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results token = getAccessToken() demisto.results(vectorCompare(token)) elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(attackResultList(token)) elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional token = getAccessToken() demisto.results(specificThreatsResults(token)) elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses token = getAccessToken() demisto.results(peerList(token)) elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses token = getAccessToken() demisto.results(eMailPeerList(token)) elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors token = getAccessToken() demisto.results(attackAllVectors(token)) elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector token = getAccessToken() demisto.results(attackSingle(token)) elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully token = getAccessToken() demisto.results(triggerUpdate(token)) elif demisto.command() == 'picus-version': # Returns the current version and the update time config token = getAccessToken() demisto.results(version(token)) elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(threatList(token)) elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(mitigationList(token)) elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters token = getAccessToken() demisto.results(mitreMatrix(token)) elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(sigmaRulesList(token)) elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination token = getAccessToken() demisto.results(vectorList(token)) elif demisto.command() == 'test-module': demisto.results(test_module()) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') " 32519,"def main() -> None: logging.getLogger(""argus_cli"").setLevel(""WARNING"") first_fetch_period = parse_first_fetch( demisto.params().get(""first_fetch"", ""-1 day"") ) set_argus_settings( demisto.params().get(""api_key""), demisto.params().get(""api_url""), handle_proxy(), demisto.params().get(""insecure""), ) demisto.debug(f""Command being called is {demisto.command()}"") try: if demisto.command() == ""test-module"": # This is the call made when pressing the integration Test button. return_results(test_module_command()) elif demisto.command() == ""fetch-incidents"": # Set and define the fetch incidents command to run after activated via integration settings. next_run, incidents = fetch_incidents( last_run=demisto.getLastRun(), first_fetch_period=first_fetch_period, limit=demisto.params().get(""max_fetch"", 25), min_severity=demisto.params().get(""min_severity"", ""low"").lower(), integration_instance=demisto.integrationInstance(), mirror_direction=demisto.params().get(""mirror_direction"", ""None""), mirror_tags=demisto.params().get(""mirror_tag""), exclude_tag=demisto.params().get(""exclude_tag""), ) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == ""get-remote-data"": return_results( get_remote_data_command( demisto.args(), integration_instance=demisto.integrationInstance(), mirror_direction=demisto.params().get(""mirror_direction"", ""None""), mirror_tags=demisto.params().get(""mirror_tag""), ) ) if demisto.command() == ""get-modified-remote-data"": # Hotfix for mirroring issues. raise NotImplementedError elif demisto.command() == ""argus-add-attachment"": return_results(add_attachment_command(demisto.args())) elif demisto.command() == ""update-remote-system"": return_results(update_remote_system_command(demisto.args())) elif demisto.command() == ""argus-add-case-tag"": return_results(add_case_tag_command(demisto.args())) elif demisto.command() == ""argus-add-comment"": return_results(add_comment_command(demisto.args())) elif demisto.command() == ""argus-advanced-case-search"": return_results(advanced_case_search_command(demisto.args())) elif demisto.command() == ""argus-close-case"": return_results(close_case_command(demisto.args())) elif demisto.command() == ""argus-create-case"": return_results(create_case_command(demisto.args())) elif demisto.command() == ""argus-delete-case"": return_results(delete_case_command(demisto.args())) elif demisto.command() == ""argus-delete-comment"": return_results(delete_comment_command(demisto.args())) elif demisto.command() == ""argus-download-attachment"": return_results(download_attachment_command(demisto.args())) elif demisto.command() == ""argus-download-attachment-by-filename"": return_results(download_attachment_by_filename_command(demisto.args())) elif demisto.command() == ""argus-edit-comment"": return_results(edit_comment_command(demisto.args())) elif demisto.command() == ""argus-get-attachment"": return_results(get_attachment_command(demisto.args())) elif demisto.command() == ""argus-get-case-metadata-by-id"": return_results(get_case_metadata_by_id_command(demisto.args())) elif demisto.command() == ""argus-list-case-attachments"": return_results(list_case_attachments_command(demisto.args())) elif demisto.command() == ""argus-list-case-tags"": return_results(list_case_tags_command(demisto.args())) elif demisto.command() == ""argus-list-case-comments"": return_results(list_case_comments_command(demisto.args())) elif demisto.command() == ""argus-remove-case-tag-by-id"": return_results(remove_case_tag_by_id_command(demisto.args())) elif demisto.command() == ""argus-remove-case-tag-by-key-value"": return_results(remove_case_tag_by_key_value_command(demisto.args())) elif demisto.command() == ""argus-update-case"": return_results(update_case_command(demisto.args())) elif demisto.command() == ""argus-get-event"": return_results(get_event_command(demisto.args())) elif demisto.command() == ""argus-get-events-for-case"": return_results(get_events_for_case_command(demisto.args())) elif demisto.command() == ""argus-find-aggregated-events"": return_results(find_aggregated_events_command(demisto.args())) elif demisto.command() == ""argus-list-aggregated-events"": return_results(list_aggregated_events_command(demisto.args())) elif demisto.command() == ""argus-get-payload"": return_results(get_payload_command(demisto.args())) elif demisto.command() == ""argus-get-pcap"": return_results(get_pcap_command(demisto.args())) elif demisto.command() == ""argus-find-nids-events"": return_results(find_nids_events_command(demisto.args())) elif demisto.command() == ""argus-list-nids-events"": return_results(list_nids_events_command(demisto.args())) elif demisto.command() == ""argus-pdns-search-records"": return_results(search_records_command(demisto.args())) elif demisto.command() == ""argus-fetch-observations-for-domain"": return_results(fetch_observations_for_domain_command(demisto.args())) elif demisto.command() == ""argus-fetch-observations-for-ip"": return_results(fetch_observations_for_i_p_command(demisto.args())) elif demisto.command() == ""argus-print-case-comments"": return_results(print_case_comments_command(demisto.args())) elif demisto.command() == ""argus-print-case-metadata-by-id"": return_results(print_case_metadata_by_id_command(demisto.args())) elif demisto.command() == ""argus-download-case-attachments"": return_results(download_case_attachments_command(demisto.args())) # Log exceptions and return errors except AccessDeniedException as denied: demisto.info(denied.message) return_warning(denied.message) except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error( f""Failed to execute {demisto.command()} command.\nError:\n{str(e)}"" ) ","def main() -> None: logging.getLogger(""argus_cli"").setLevel(""WARNING"") first_fetch_period = parse_first_fetch( demisto.params().get(""first_fetch"", ""-1 day"") ) set_argus_settings( demisto.params().get(""api_key""), demisto.params().get(""api_url""), handle_proxy(), demisto.params().get(""insecure""), ) demisto.debug(f""Command being called is {demisto.command()}"") try: if demisto.command() == ""test-module"": # This is the call made when pressing the integration Test button. return_results(test_module_command()) elif demisto.command() == ""fetch-incidents"": # Set and define the fetch incidents command to run after activated via integration settings. next_run, incidents = fetch_incidents( last_run=demisto.getLastRun(), first_fetch_period=first_fetch_period, limit=demisto.params().get(""max_fetch"", 25), min_severity=demisto.params().get(""min_severity"", ""low"").lower(), integration_instance=demisto.integrationInstance(), mirror_direction=demisto.params().get(""mirror_direction"", ""None""), mirror_tags=demisto.params().get(""mirror_tag""), exclude_tag=demisto.params().get(""exclude_tag""), ) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == ""get-remote-data"": return_results( get_remote_data_command( demisto.args(), integration_instance=demisto.integrationInstance(), mirror_direction=demisto.params().get(""mirror_direction"", ""None""), mirror_tags=demisto.params().get(""mirror_tag""), ) ) if demisto.command() == ""get-modified-remote-data"": # Hotfix for mirroring issues. raise NotImplementedError(f'The ""get-modified-remote-data"" command is not implemented') elif demisto.command() == ""argus-add-attachment"": return_results(add_attachment_command(demisto.args())) elif demisto.command() == ""update-remote-system"": return_results(update_remote_system_command(demisto.args())) elif demisto.command() == ""argus-add-case-tag"": return_results(add_case_tag_command(demisto.args())) elif demisto.command() == ""argus-add-comment"": return_results(add_comment_command(demisto.args())) elif demisto.command() == ""argus-advanced-case-search"": return_results(advanced_case_search_command(demisto.args())) elif demisto.command() == ""argus-close-case"": return_results(close_case_command(demisto.args())) elif demisto.command() == ""argus-create-case"": return_results(create_case_command(demisto.args())) elif demisto.command() == ""argus-delete-case"": return_results(delete_case_command(demisto.args())) elif demisto.command() == ""argus-delete-comment"": return_results(delete_comment_command(demisto.args())) elif demisto.command() == ""argus-download-attachment"": return_results(download_attachment_command(demisto.args())) elif demisto.command() == ""argus-download-attachment-by-filename"": return_results(download_attachment_by_filename_command(demisto.args())) elif demisto.command() == ""argus-edit-comment"": return_results(edit_comment_command(demisto.args())) elif demisto.command() == ""argus-get-attachment"": return_results(get_attachment_command(demisto.args())) elif demisto.command() == ""argus-get-case-metadata-by-id"": return_results(get_case_metadata_by_id_command(demisto.args())) elif demisto.command() == ""argus-list-case-attachments"": return_results(list_case_attachments_command(demisto.args())) elif demisto.command() == ""argus-list-case-tags"": return_results(list_case_tags_command(demisto.args())) elif demisto.command() == ""argus-list-case-comments"": return_results(list_case_comments_command(demisto.args())) elif demisto.command() == ""argus-remove-case-tag-by-id"": return_results(remove_case_tag_by_id_command(demisto.args())) elif demisto.command() == ""argus-remove-case-tag-by-key-value"": return_results(remove_case_tag_by_key_value_command(demisto.args())) elif demisto.command() == ""argus-update-case"": return_results(update_case_command(demisto.args())) elif demisto.command() == ""argus-get-event"": return_results(get_event_command(demisto.args())) elif demisto.command() == ""argus-get-events-for-case"": return_results(get_events_for_case_command(demisto.args())) elif demisto.command() == ""argus-find-aggregated-events"": return_results(find_aggregated_events_command(demisto.args())) elif demisto.command() == ""argus-list-aggregated-events"": return_results(list_aggregated_events_command(demisto.args())) elif demisto.command() == ""argus-get-payload"": return_results(get_payload_command(demisto.args())) elif demisto.command() == ""argus-get-pcap"": return_results(get_pcap_command(demisto.args())) elif demisto.command() == ""argus-find-nids-events"": return_results(find_nids_events_command(demisto.args())) elif demisto.command() == ""argus-list-nids-events"": return_results(list_nids_events_command(demisto.args())) elif demisto.command() == ""argus-pdns-search-records"": return_results(search_records_command(demisto.args())) elif demisto.command() == ""argus-fetch-observations-for-domain"": return_results(fetch_observations_for_domain_command(demisto.args())) elif demisto.command() == ""argus-fetch-observations-for-ip"": return_results(fetch_observations_for_i_p_command(demisto.args())) elif demisto.command() == ""argus-print-case-comments"": return_results(print_case_comments_command(demisto.args())) elif demisto.command() == ""argus-print-case-metadata-by-id"": return_results(print_case_metadata_by_id_command(demisto.args())) elif demisto.command() == ""argus-download-case-attachments"": return_results(download_case_attachments_command(demisto.args())) # Log exceptions and return errors except AccessDeniedException as denied: demisto.info(denied.message) return_warning(denied.message) except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error( f""Failed to execute {demisto.command()} command.\nError:\n{str(e)}"" ) " 17722,"def to_pybel(compound, box=None, title='', residues=None, show_ports=False, infer_residues=False): """""" Create a pybel.Molecule from a Compound Parameters ---------- compound : mb.Compound The mbuild Compound that need to be converted. box : mb.Box, optional, default=None title : str, optional, default=compound.name Title/name of the ParmEd Structure residues : str of list of str Labels of residues in the Compound. Residues are assigned by checking against Compound.name. show_ports : boolean, optional, default=False Include all port atoms when converting to a `Structure`. infer_residues : bool, optional, default=False Attempt to assign residues based on names of children Return ------ pybelmol : pybel.Molecule Notes ----- Most of the mb.Compound is first converted to openbabel.OBMol And then pybel creates a pybel.Molecule from the OBMol Bond orders are assumed to be 1 OBMol atom indexing starts at 1, with spatial dimension Angstrom """""" openbabel = import_('openbabel') pybel = import_('pybel') mol = openbabel.OBMol() particle_to_atom_index = {} if not residues and infer_residues: residues = list(set([child.name for child in compound.children])) if isinstance(residues, str): residues = [residues] if isinstance(residues, (list, set)): residues = tuple(residues) compound_residue_map = dict() atom_residue_map = dict() for i, part in enumerate(compound.particles(include_ports=show_ports)): if residues and part.name in residues: current_residue = mol.NewResidue() current_residue.SetName(part.name) atom_residue_map[part] = current_residue compound_residue_map[part] = current_residue elif residues: for parent in part.ancestors(): if residues and parent.name in residues: if parent not in compound_residue_map: current_residue = mol.NewResidue() current_residue.SetName(parent.name) compound_residue_map[parent] = current_residue atom_residue_map[part] = current_residue break else: # Did not find specified residues in ancestors. current_residue = mol.NewResidue() current_residue.SetName(""RES"") atom_residue_map[part] = current_residue else: current_residue = mol.NewResidue() current_residue.SetName(""RES"") atom_residue_map[part] = current_residue temp = mol.NewAtom() residue = atom_residue_map[part] temp.SetResidue(residue) if part.port_particle: temp.SetAtomicNum(0) else: try: temp.SetAtomicNum(AtomicNum[part.name.capitalize()]) except KeyError: warn(""Could not infer atomic number from "" ""{}, setting to 0"".format(part.name)) temp.SetAtomicNum(0) temp.SetVector(*(part.xyz[0]*10)) particle_to_atom_index[part] = i ucell = openbabel.OBUnitCell() if box is None: box = compound.boundingbox a, b, c = 10.0 * box.lengths alpha, beta, gamma = np.radians(box.angles) cosa = np.cos(alpha) cosb = np.cos(beta) sinb = np.sin(beta) cosg = np.cos(gamma) sing = np.sin(gamma) mat_coef_y = (cosa - cosb * cosg) / sing mat_coef_z = np.power(sinb, 2, dtype=float) - \ np.power(mat_coef_y, 2, dtype=float) if mat_coef_z > 0.: mat_coef_z = np.sqrt(mat_coef_z) else: raise Warning('Non-positive z-vector. Angles {} ' 'do not generate a box with the z-vector in the' 'positive z direction'.format(box.angles)) box_vec = [[1, 0, 0], [cosg, sing, 0], [cosb, mat_coef_y, mat_coef_z]] box_vec = np.asarray(box_vec) box_mat = (np.array([a,b,c])* box_vec.T).T first_vector = openbabel.vector3(*box_mat[0]) second_vector = openbabel.vector3(*box_mat[1]) third_vector = openbabel.vector3(*box_mat[2]) ucell.SetData(first_vector, second_vector, third_vector) mol.CloneData(ucell) for bond in compound.bonds(): bond_order = 1 mol.AddBond(particle_to_atom_index[bond[0]]+1, particle_to_atom_index[bond[1]]+1, bond_order) pybelmol = pybel.Molecule(mol) pybelmol.title = title if title else compound.name return pybelmol ","def to_pybel(compound, box=None, title='', residues=None, show_ports=False, infer_residues=False): """""" Create a pybel.Molecule from a Compound Parameters ---------- compound : mb.Compound The mbuild Compound that need to be converted. box : mb.Box, optional, default=None title : str, optional, default=compound.name Title/name of the ParmEd Structure residues : str of list of str Labels of residues in the Compound. Residues are assigned by checking against Compound.name. show_ports : boolean, optional, default=False Include all port atoms when converting to a `Structure`. infer_residues : bool, optional, default=False Attempt to assign residues based on names of children Returns ------ pybelmol : pybel.Molecule Notes ----- Most of the mb.Compound is first converted to openbabel.OBMol And then pybel creates a pybel.Molecule from the OBMol Bond orders are assumed to be 1 OBMol atom indexing starts at 1, with spatial dimension Angstrom """""" openbabel = import_('openbabel') pybel = import_('pybel') mol = openbabel.OBMol() particle_to_atom_index = {} if not residues and infer_residues: residues = list(set([child.name for child in compound.children])) if isinstance(residues, str): residues = [residues] if isinstance(residues, (list, set)): residues = tuple(residues) compound_residue_map = dict() atom_residue_map = dict() for i, part in enumerate(compound.particles(include_ports=show_ports)): if residues and part.name in residues: current_residue = mol.NewResidue() current_residue.SetName(part.name) atom_residue_map[part] = current_residue compound_residue_map[part] = current_residue elif residues: for parent in part.ancestors(): if residues and parent.name in residues: if parent not in compound_residue_map: current_residue = mol.NewResidue() current_residue.SetName(parent.name) compound_residue_map[parent] = current_residue atom_residue_map[part] = current_residue break else: # Did not find specified residues in ancestors. current_residue = mol.NewResidue() current_residue.SetName(""RES"") atom_residue_map[part] = current_residue else: current_residue = mol.NewResidue() current_residue.SetName(""RES"") atom_residue_map[part] = current_residue temp = mol.NewAtom() residue = atom_residue_map[part] temp.SetResidue(residue) if part.port_particle: temp.SetAtomicNum(0) else: try: temp.SetAtomicNum(AtomicNum[part.name.capitalize()]) except KeyError: warn(""Could not infer atomic number from "" ""{}, setting to 0"".format(part.name)) temp.SetAtomicNum(0) temp.SetVector(*(part.xyz[0]*10)) particle_to_atom_index[part] = i ucell = openbabel.OBUnitCell() if box is None: box = compound.boundingbox a, b, c = 10.0 * box.lengths alpha, beta, gamma = np.radians(box.angles) cosa = np.cos(alpha) cosb = np.cos(beta) sinb = np.sin(beta) cosg = np.cos(gamma) sing = np.sin(gamma) mat_coef_y = (cosa - cosb * cosg) / sing mat_coef_z = np.power(sinb, 2, dtype=float) - \ np.power(mat_coef_y, 2, dtype=float) if mat_coef_z > 0.: mat_coef_z = np.sqrt(mat_coef_z) else: raise Warning('Non-positive z-vector. Angles {} ' 'do not generate a box with the z-vector in the' 'positive z direction'.format(box.angles)) box_vec = [[1, 0, 0], [cosg, sing, 0], [cosb, mat_coef_y, mat_coef_z]] box_vec = np.asarray(box_vec) box_mat = (np.array([a,b,c])* box_vec.T).T first_vector = openbabel.vector3(*box_mat[0]) second_vector = openbabel.vector3(*box_mat[1]) third_vector = openbabel.vector3(*box_mat[2]) ucell.SetData(first_vector, second_vector, third_vector) mol.CloneData(ucell) for bond in compound.bonds(): bond_order = 1 mol.AddBond(particle_to_atom_index[bond[0]]+1, particle_to_atom_index[bond[1]]+1, bond_order) pybelmol = pybel.Molecule(mol) pybelmol.title = title if title else compound.name return pybelmol " 23256,"def test_domain_cpp_ast_attributes(): # style: C++ check('member', '[[]] int f', {1: 'f__i', 2: '1f'}) check('member', '[ [ ] ] int f', {1: 'f__i', 2: '1f'}, # this will fail when the proper grammar is implemented output='[[ ]] int f') check('member', '[[a]] int f', {1: 'f__i', 2: '1f'}) # style: GNU check('member', '__attribute__(()) int f', {1: 'f__i', 2: '1f'}) check('member', '__attribute__((a)) int f', {1: 'f__i', 2: '1f'}) check('member', '__attribute__((a, b)) int f', {1: 'f__i', 2: '1f'}) check('member', '__attribute__((optimize(3))) int f', {1: 'f__i', 2: '1f'}) check('member', '__attribute__((format(printf, 1, 2))) int f', {1: 'f__i', 2: '1f'}) # style: user-defined id check('member', 'id_attr int f', {1: 'f__i', 2: '1f'}) # style: user-defined paren check('member', 'paren_attr() int f', {1: 'f__i', 2: '1f'}) check('member', 'paren_attr(a) int f', {1: 'f__i', 2: '1f'}) check('member', 'paren_attr("""") int f', {1: 'f__i', 2: '1f'}) check('member', 'paren_attr(()[{}][]{}) int f', {1: 'f__i', 2: '1f'}) with pytest.raises(DefinitionError): parse('member', 'paren_attr(() int f') with pytest.raises(DefinitionError): parse('member', 'paren_attr([) int f') with pytest.raises(DefinitionError): parse('member', 'paren_attr({) int f') with pytest.raises(DefinitionError): parse('member', 'paren_attr([)]) int f') with pytest.raises(DefinitionError): parse('member', 'paren_attr((])) int f') with pytest.raises(DefinitionError): parse('member', 'paren_attr({]}) int f') # position: decl specs check('function', 'static inline __attribute__(()) void f()', {1: 'f', 2: '1fv'}, output='__attribute__(()) static inline void f()') check('function', '[[attr1]] [[attr2]] void f()', {1: 'f', 2: '1fv'}) # position: declarator check('member', 'int *[[attr]] i', {1: 'i__iP', 2: '1i'}) check('member', 'int *const [[attr]] volatile i', {1: 'i__iPVC', 2: '1i'}, output='int *[[attr]] volatile const i') check('member', 'int &[[attr]] i', {1: 'i__iR', 2: '1i'}) check('member', 'int *[[attr]] *i', {1: 'i__iPP', 2: '1i'}) # position: parameters and qualifiers check('function', 'void f() [[attr1]] [[attr2]]', {1: 'f', 2: '1fv'}) # position: class check('class', '{key}[[nodiscard]] Foo', {1: 'Foo', 2: '3Foo', 3: '3Foo', 4: '3Foo'}, key='class') check('union', '{key}[[nodiscard]] Foo', {1: None, 2: '3Foo', 3: '3Foo', 4: '3Foo'}, key='union') # position: enum check('enum', '{key}[[nodiscard]] Foo', {1: None, 2: '3Foo', 3: '3Foo', 4: '3Foo'}, key='enum') ","def test_domain_cpp_ast_attributes(): # style: C++ check('member', '[[]] int f', {1: 'f__i', 2: '1f'}) check('member', '[ [ ] ] int f', {1: 'f__i', 2: '1f'}, # this will fail when the proper grammar is implemented output='[[ ]] int f') check('member', '[[a]] int f', {1: 'f__i', 2: '1f'}) # style: GNU check('member', '__attribute__(()) int f', {1: 'f__i', 2: '1f'}) check('member', '__attribute__((a)) int f', {1: 'f__i', 2: '1f'}) check('member', '__attribute__((a, b)) int f', {1: 'f__i', 2: '1f'}) check('member', '__attribute__((optimize(3))) int f', {1: 'f__i', 2: '1f'}) check('member', '__attribute__((format(printf, 1, 2))) int f', {1: 'f__i', 2: '1f'}) # style: user-defined id check('member', 'id_attr int f', {1: 'f__i', 2: '1f'}) # style: user-defined paren check('member', 'paren_attr() int f', {1: 'f__i', 2: '1f'}) check('member', 'paren_attr(a) int f', {1: 'f__i', 2: '1f'}) check('member', 'paren_attr("""") int f', {1: 'f__i', 2: '1f'}) check('member', 'paren_attr(()[{}][]{}) int f', {1: 'f__i', 2: '1f'}) with pytest.raises(DefinitionError): parse('member', 'paren_attr(() int f') with pytest.raises(DefinitionError): parse('member', 'paren_attr([) int f') with pytest.raises(DefinitionError): parse('member', 'paren_attr({) int f') with pytest.raises(DefinitionError): parse('member', 'paren_attr([)]) int f') with pytest.raises(DefinitionError): parse('member', 'paren_attr((])) int f') with pytest.raises(DefinitionError): parse('member', 'paren_attr({]}) int f') # position: decl specs check('function', 'static inline __attribute__(()) void f()', {1: 'f', 2: '1fv'}, output='__attribute__(()) static inline void f()') check('function', '[[attr1]] [[attr2]] void f()', {1: 'f', 2: '1fv'}) # position: declarator check('member', 'int *[[attr]] i', {1: 'i__iP', 2: '1i'}) check('member', 'int *const [[attr]] volatile i', {1: 'i__iPVC', 2: '1i'}, output='int *[[attr]] volatile const i') check('member', 'int &[[attr]] i', {1: 'i__iR', 2: '1i'}) check('member', 'int *[[attr]] *i', {1: 'i__iPP', 2: '1i'}) # position: parameters and qualifiers check('function', 'void f() [[attr1]] [[attr2]]', {1: 'f', 2: '1fv'}) # position: class check('class', '{key}[[nodiscard]] Foo', {1: 'Foo', 2: '3Foo', 3: '3Foo', 4: '3Foo'}, key='class') check('union', '{key}[[nodiscard]] Foo', {1: None, 2: '3Foo'}, key='union') # position: enum check('enum', '{key}[[nodiscard]] Foo', {1: None, 2: '3Foo', 3: '3Foo', 4: '3Foo'}, key='enum') " 20002,"def multi(img, coord, radius, spacing=None, nrows=None, ncols=None): """"""Create a labeled mask for color card chips Inputs img = Input image data. coord = Two-element tuple of the center of the top left object. radius = Radius of ROIs. spacing = Two-element tuple of the horizontal and vertical spacing between ROIs. nrows = Number of chip rows. ncols = Number of chip columns. Returns: mask = Labeled mask ROIs :param img: numpy.ndarray :param coord: tuple, list :param radius: int :param spacing: tuple :param nrows: int :param ncols: int :return mask: numpy.ndarray """""" # Autoincrement the device counter params.device += 1 # Initialize ROI list rois = [] # Store user debug debug = params.debug # Temporarily disable debug params.debug = None # Get the height and width of the reference image height, width = np.shape(img)[:2] # Initialize a binary image of the circle bin_img = np.zeros((height, width), dtype=np.uint8) roi_contour = [] roi_hierarchy = [] # Grid of ROIs if (type(coord) == tuple) and ((nrows and ncols) is not None): # Loop over each row for i in range(0, nrows): # The upper left corner is the y starting coordinate + the ROI offset * the vertical spacing y = coord[1] + i * spacing[1] # Loop over each column for j in range(0, ncols): # The upper left corner is the x starting coordinate + the ROI offset * the # horizontal spacing between chips x = coord[0] + j * spacing[0] # Create a chip ROI rois.append(circle(img=img, x=x, y=y, r=radius)) # Draw the circle on the binary image cv2.circle(bin_img, (x, y), radius, 255, -1) # Make a list of contours and hierarchies roi_contour.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:][0]) roi_hierarchy.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:][1]) # Create an array of contours and list of hierarchy for when debug is set to 'plot' roi_contour1, roi_hierarchy1 = cv2.findContours(np.copy(bin_img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:] # User specified ROI centers elif (type(coord) == list) and ((nrows and ncols) is None): for i in range(0, len(coord)): y = coord[i][1] x = coord[i][0] rois.append(circle(img=img, x=x, y=y, r=radius)) # Draw the circle on the binary image cv2.circle(bin_img, (x, y), radius, 255, -1) # Make a list of contours and hierarchies roi_contour.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:][0]) roi_hierarchy.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:][1]) # Create an array of contours and list of hierarchy for when debug is set to 'plot' roi_contour1, roi_hierarchy1 = cv2.findContours(np.copy(bin_img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:] else: fatal_error(""Function can either make a grid of ROIs (user must provide nrows, ncols, spacing, and coord) "" ""or take custom ROI coordinates (user must provide a list of tuples to 'coord' parameter)"") # Reset debug params.debug = debug # Draw the ROIs if requested if params.debug is not None: _draw_roi(img=img, roi_contour=roi_contour1) return roi_contour, roi_hierarchy ","def multi(img, coord, radius, spacing=None, nrows=None, ncols=None): """"""Create a labeled mask for color card chips Inputs img = Input image data. coord = Two-element tuple of the center of the top left object. radius = Radius of ROIs. spacing = Two-element tuple of the horizontal and vertical spacing between ROIs. nrows = Number of chip rows. ncols = Number of columns in ROI layout. Returns: mask = Labeled mask ROIs :param img: numpy.ndarray :param coord: tuple, list :param radius: int :param spacing: tuple :param nrows: int :param ncols: int :return mask: numpy.ndarray """""" # Autoincrement the device counter params.device += 1 # Initialize ROI list rois = [] # Store user debug debug = params.debug # Temporarily disable debug params.debug = None # Get the height and width of the reference image height, width = np.shape(img)[:2] # Initialize a binary image of the circle bin_img = np.zeros((height, width), dtype=np.uint8) roi_contour = [] roi_hierarchy = [] # Grid of ROIs if (type(coord) == tuple) and ((nrows and ncols) is not None): # Loop over each row for i in range(0, nrows): # The upper left corner is the y starting coordinate + the ROI offset * the vertical spacing y = coord[1] + i * spacing[1] # Loop over each column for j in range(0, ncols): # The upper left corner is the x starting coordinate + the ROI offset * the # horizontal spacing between chips x = coord[0] + j * spacing[0] # Create a chip ROI rois.append(circle(img=img, x=x, y=y, r=radius)) # Draw the circle on the binary image cv2.circle(bin_img, (x, y), radius, 255, -1) # Make a list of contours and hierarchies roi_contour.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:][0]) roi_hierarchy.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:][1]) # Create an array of contours and list of hierarchy for when debug is set to 'plot' roi_contour1, roi_hierarchy1 = cv2.findContours(np.copy(bin_img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:] # User specified ROI centers elif (type(coord) == list) and ((nrows and ncols) is None): for i in range(0, len(coord)): y = coord[i][1] x = coord[i][0] rois.append(circle(img=img, x=x, y=y, r=radius)) # Draw the circle on the binary image cv2.circle(bin_img, (x, y), radius, 255, -1) # Make a list of contours and hierarchies roi_contour.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:][0]) roi_hierarchy.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:][1]) # Create an array of contours and list of hierarchy for when debug is set to 'plot' roi_contour1, roi_hierarchy1 = cv2.findContours(np.copy(bin_img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:] else: fatal_error(""Function can either make a grid of ROIs (user must provide nrows, ncols, spacing, and coord) "" ""or take custom ROI coordinates (user must provide a list of tuples to 'coord' parameter)"") # Reset debug params.debug = debug # Draw the ROIs if requested if params.debug is not None: _draw_roi(img=img, roi_contour=roi_contour1) return roi_contour, roi_hierarchy " 43955,"def _diff2(i, j, ri, rj, alpha, beta): r""""""Compute the second order differentiated integral needed for evaluating a kinetic integral. The second order integral :math:`D_{ij}^2`, where :math:`i` and :math:`j` denote angular momentum components of Gaussian functions, is computed from overlap integrals :math:`S` and the Gaussian exponent :math:`\beta` as [`Helgaker (1995) p804 `_]: .. math:: D_{ij}^2 = j(j-1)S_{i,j-2}^0 - 2\beta(2j+1)S_{i,j}^0 + 4\beta^2 S_{i,j+2}^0. Args: i (integer): angular momentum component for the first Gaussian function j (integer): angular momentum component for the second Gaussian function ri (float): position component of the the first Gaussian function ri (float): position component of the the second Gaussian function alpha (array[float]): exponent of the first Gaussian function beta (array[float]): exponent of the second Gaussian function Returns: array[float]: second order differentiated integral between two Gaussian functions """""" p = alpha + beta d1 = j * (j - 1) * anp.sqrt(anp.pi / p) * expansion(i, j - 2, ri, rj, alpha, beta, 0) d2 = -2 * beta * (2 * j + 1) * anp.sqrt(anp.pi / p) * expansion(i, j, ri, rj, alpha, beta, 0) d3 = 4 * beta ** 2 * anp.sqrt(anp.pi / p) * expansion(i, j + 2, ri, rj, alpha, beta, 0) return d1 + d2 + d3 ","def _diff2(i, j, ri, rj, alpha, beta): r""""""Compute the second order differentiated integral needed for evaluating a kinetic integral. The second order integral :math:`D_{ij}^2`, where :math:`i` and :math:`j` denote angular momentum components of Gaussian functions, is computed from overlap integrals :math:`S` and the Gaussian exponent :math:`\beta` as [`Helgaker (1995) p804 `_]: .. math:: D_{ij}^2 = j(j-1)S_{i,j-2}^0 - 2\beta(2j+1)S_{i,j}^0 + 4\beta^2 S_{i,j+2}^0. Args: i (integer): angular momentum component for the first Gaussian function j (integer): angular momentum component for the second Gaussian function ri (float): position component of the the first Gaussian function ri (float): position component of the the second Gaussian function alpha (array[float]): exponent of the first Gaussian function beta (array[float]): exponent of the second Gaussian function Returns: array[float]: second-order differentiated integral between two Gaussian functions """""" p = alpha + beta d1 = j * (j - 1) * anp.sqrt(anp.pi / p) * expansion(i, j - 2, ri, rj, alpha, beta, 0) d2 = -2 * beta * (2 * j + 1) * anp.sqrt(anp.pi / p) * expansion(i, j, ri, rj, alpha, beta, 0) d3 = 4 * beta ** 2 * anp.sqrt(anp.pi / p) * expansion(i, j + 2, ri, rj, alpha, beta, 0) return d1 + d2 + d3 " 24855,"def my_func(self): """"""This is a docstring. :return: Always False :rtype: bool """""" return False ","def my_func(self): """"""find_sphinx_returns :return: Always False :rtype: bool """""" return False " 49015,"def create_instance(objcls, settings, crawler, *args, **kwargs): """"""Construct a class instance using its ``from_crawler`` or ``from_settings`` constructors, if available. At least one of ``settings`` and ``crawler`` needs to be different from ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used. If ``crawler`` is ``None``, only the ``from_settings`` constructor will be tried. ``*args`` and ``**kwargs`` are forwarded to the constructors. Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``. Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an extension has not been implemented correctly). """""" if settings is None: if crawler is None: raise ValueError(""Specify at least one of settings and crawler."") settings = crawler.settings instance = None if crawler and hasattr(objcls, 'from_crawler'): instance = objcls.from_crawler(crawler, *args, **kwargs) elif hasattr(objcls, 'from_settings'): instance = objcls.from_settings(settings, *args, **kwargs) else: instance = objcls(*args, **kwargs) if instance is None: raise TypeError(""Instance is None for %s"" % objcls) return instance ","def create_instance(objcls, settings, crawler, *args, **kwargs): """"""Construct a class instance using its ``from_crawler`` or ``from_settings`` constructors, if available. At least one of ``settings`` and ``crawler`` needs to be different from ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used. If ``crawler`` is ``None``, only the ``from_settings`` constructor will be tried. ``*args`` and ``**kwargs`` are forwarded to the constructors. Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``. Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an extension has not been implemented correctly). """""" if settings is None: if crawler is None: raise ValueError(""Specify at least one of settings and crawler."") settings = crawler.settings instance = None if crawler and hasattr(objcls, 'from_crawler'): instance = objcls.from_crawler(crawler, *args, **kwargs) elif hasattr(objcls, 'from_settings'): instance = objcls.from_settings(settings, *args, **kwargs) else: instance = objcls(*args, **kwargs) if not isinstance(instance, objcls): raise TypeError(""%r is not an instance of %r"" % (instance, objcls)) raise TypeError(""Instance is None for %s"" % objcls) return instance " 33435,"def get_package(session): """""" ""Perform the package operation"""""" config = session.config if config.skipsdist: info(""skipping sdist step"") return None lock_file = session.config.toxworkdir.join(""{}.lock"".format(session.config.isolated_build_env)) with hold_lock(lock_file, verbosity0): package = acquire_package(config, session) session_package = create_session_view(package, config.temp_dir) return session_package, package ","def get_package(session): """"""Perform the package operation"""""" config = session.config if config.skipsdist: info(""skipping sdist step"") return None lock_file = session.config.toxworkdir.join(""{}.lock"".format(session.config.isolated_build_env)) with hold_lock(lock_file, verbosity0): package = acquire_package(config, session) session_package = create_session_view(package, config.temp_dir) return session_package, package " 31211,"def main(): """""" PARSE AND VALIDATE INTEGRATION PARAMS """""" token = demisto.params().get('token') # get the service API url base_url = urljoin(demisto.params()['url'], '/api/rest') verify_certificate = not demisto.params().get('insecure', False) # How much time before the first fetch to retrieve incidents first_fetch_time = demisto.params().get('fetch_time', '3 days').strip() proxy = demisto.params().get('proxy', False) headers = { ""Authorization"": token } LOG(f'Command being called is {demisto.command()}') try: client = Client( base_url=base_url, verify=verify_certificate, proxy=proxy, headers=headers ) args = demisto.args() if demisto.command() == 'test-module': # This is the call made when pressing the integration Test button. result = test_module(client) demisto.results(result) elif demisto.command() == 'fetch-incidents': # Set and define the fetch incidents command to run after activated via integration settings. next_run, incidents = fetch_incidents( client=client, last_run=demisto.getLastRun(), first_fetch_time=first_fetch_time) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == 'mantis-get-issue-by-id': mantis_get_issue_by_id_command(client, args) elif demisto.command() == 'mantis-get-issues': mantis_get_all_issues_command(client, args) elif demisto.command() == 'mantis-create-issue': mantis_create_issue_command(client, args) elif demisto.command() == 'mantis-add-note': matis_create_note_command(client, args) elif demisto.command() == 'mantis-close-issue': mantis_close_issue_command(client, args) # Log exceptions except Exception as e: return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}') ","def main(): """""" PARSE AND VALIDATE INTEGRATION PARAMS """""" token = demisto.params().get('token') # get the service API url base_url = urljoin(demisto.params()['url'], '/api/rest') verify_certificate = not demisto.params().get('insecure', False) # How much time before the first fetch to retrieve incidents first_fetch_time = demisto.params().get('fetch_time', '3 days').strip() proxy = demisto.params().get('proxy', False) headers = { ""Authorization"": token } LOG(f'Command being called is {demisto.command()}') try: client = Client( base_url=base_url, verify=verify_certificate, proxy=proxy, headers=headers ) args = demisto.args() if demisto.command() == 'test-module': # This is the call made when pressing the integration Test button. result = test_module(client) demisto.results(result) elif demisto.command() == 'fetch-incidents': # Set and define the fetch incidents command to run after activated via integration settings. next_run, incidents = fetch_incidents( client=client, last_run=demisto.getLastRun(), first_fetch_time=first_fetch_time) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == 'mantis-get-issue-by-id': mantis_get_issue_by_id_command(client, args) elif demisto.command() == 'mantis-get-issues': mantis_get_all_issues_command(client, args) elif demisto.command() == 'mantis-create-issue': mantis_create_issue_command(client, args) elif demisto.command() == 'mantis-add-note': matis_create_note_command(client, args) elif demisto.command() == 'mantis-close-issue': return_results(mantis_close_issue_command(client, args)) # Log exceptions except Exception as e: return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}') " 31340,"def get_event_command(client: Client, args: dict): """""" Get a specific event by ID. Args: client (Client): Qualys FIM API client. args (dict): All command arguments. Returns: CommandResults: outputs, readable outputs and raw response for XSOAR. """""" raw_response = client.get_event(str(args.get('event_id'))) table_headers = ['name', 'action', 'id', 'severity', 'action', 'incidentId', 'profiles', 'type', 'dateTime', 'fullPath'] object_data = create_event_or_incident_output(raw_response, table_headers) object_data['dateTime'] = datetime.strptime(str(object_data.get('dateTime')), DATETIME_FORMAT).strftime(TABLE_DATETIME_FORMAT) readable_output = tableToMarkdown(name='Found Event:', t=object_data, headers=table_headers, removeNull=True) return CommandResults(outputs_prefix='QualysFIM.Event', outputs_key_field='id', raw_response=raw_response, outputs=raw_response, readable_output=readable_output) ","def get_event_command(client: Client, args: dict): """""" Get a specific event by ID. Args: client (Client): Qualys FIM API client. args (dict): All command arguments. Returns: CommandResults: outputs, readable outputs and raw response for XSOAR. """""" raw_response = client.get_event(str(args['event_id'])) table_headers = ['name', 'action', 'id', 'severity', 'action', 'incidentId', 'profiles', 'type', 'dateTime', 'fullPath'] object_data = create_event_or_incident_output(raw_response, table_headers) object_data['dateTime'] = datetime.strptime(str(object_data.get('dateTime')), DATETIME_FORMAT).strftime(TABLE_DATETIME_FORMAT) readable_output = tableToMarkdown(name='Found Event:', t=object_data, headers=table_headers, removeNull=True) return CommandResults(outputs_prefix='QualysFIM.Event', outputs_key_field='id', raw_response=raw_response, outputs=raw_response, readable_output=readable_output) " 42464,"def get_features_used(node: Node) -> Set[Feature]: # noqa: C901 """"""Return a set of (relatively) new Python features used in this file. Currently looking for: - f-strings; - underscores in numeric literals; - trailing commas after * or ** in function signatures and calls; - positional only arguments in function signatures and lambdas; - assignment expression; - relaxed decorator syntax; - print / exec statements; """""" features: Set[Feature] = set() for n in node.pre_order(): if n.type == token.STRING: value_head = n.value[:2] # type: ignore if value_head in {'f""', 'F""', ""f'"", ""F'"", ""rf"", ""fr"", ""RF"", ""FR""}: features.add(Feature.F_STRINGS) elif n.type == token.NUMBER: assert isinstance(n, Leaf) if ""_"" in n.value: features.add(Feature.NUMERIC_UNDERSCORES) elif n.value.endswith(""L""): # Python 2: 10L features.add(Feature.LONG_INT_LITERAL) elif n.type == token.SLASH: if n.parent and n.parent.type in { syms.typedargslist, syms.arglist, syms.varargslist, }: features.add(Feature.POS_ONLY_ARGUMENTS) elif n.type == token.COLONEQUAL: features.add(Feature.ASSIGNMENT_EXPRESSIONS) elif n.type == syms.decorator: if len(n.children) > 1 and not is_simple_decorator_expression( n.children[1] ): features.add(Feature.RELAXED_DECORATORS) elif ( n.type in {syms.typedargslist, syms.arglist} and n.children and n.children[-1].type == token.COMMA ): if n.type == syms.typedargslist: feature = Feature.TRAILING_COMMA_IN_DEF else: feature = Feature.TRAILING_COMMA_IN_CALL for ch in n.children: if ch.type in STARS: features.add(feature) if ch.type == syms.argument: for argch in ch.children: if argch.type in STARS: features.add(feature) # Python 2 only features (for its deprecation) except for 10L, see above elif n.type == syms.print_stmt: features.add(Feature.PRINT_STMT) elif n.type == syms.exec_stmt: features.add(Feature.EXEC_STMT) elif n.type == syms.tfpdef: # def set_position((x, y), value): # ... features.add(Feature.AUTOMATIC_PARAMETER_UNPACKING) elif n.type == syms.except_clause: # try: # ... # except Exception, err: # ... if len(n.children) >= 4: if n.children[-2].type == token.COMMA: features.add(Feature.COMMA_STYLE_EXCEPT) elif n.type == syms.raise_stmt: # raise Exception, ""msg"" if len(n.children) >= 4: if n.children[-2].type == token.COMMA: features.add(Feature.COMMA_STYLE_RAISE) elif n.type == token.BACKQUOTE: # `i'm surprised this ever existed` features.add(Feature.BACKQUOTE_REPR) return features ","def get_features_used(node: Node) -> Set[Feature]: # noqa: C901 """"""Return a set of (relatively) new Python features used in this file. Currently looking for: - f-strings; - underscores in numeric literals; - trailing commas after * or ** in function signatures and calls; - positional only arguments in function signatures and lambdas; - assignment expression; - relaxed decorator syntax; - print / exec statements; """""" features: Set[Feature] = set() for n in node.pre_order(): if n.type == token.STRING: value_head = n.value[:2] # type: ignore if value_head in {'f""', 'F""', ""f'"", ""F'"", ""rf"", ""fr"", ""RF"", ""FR""}: features.add(Feature.F_STRINGS) elif n.type == token.NUMBER: assert isinstance(n, Leaf) if ""_"" in n.value: features.add(Feature.NUMERIC_UNDERSCORES) elif n.value.endswith((""l"", ""L"")): # Python 2: 10L features.add(Feature.LONG_INT_LITERAL) elif n.type == token.SLASH: if n.parent and n.parent.type in { syms.typedargslist, syms.arglist, syms.varargslist, }: features.add(Feature.POS_ONLY_ARGUMENTS) elif n.type == token.COLONEQUAL: features.add(Feature.ASSIGNMENT_EXPRESSIONS) elif n.type == syms.decorator: if len(n.children) > 1 and not is_simple_decorator_expression( n.children[1] ): features.add(Feature.RELAXED_DECORATORS) elif ( n.type in {syms.typedargslist, syms.arglist} and n.children and n.children[-1].type == token.COMMA ): if n.type == syms.typedargslist: feature = Feature.TRAILING_COMMA_IN_DEF else: feature = Feature.TRAILING_COMMA_IN_CALL for ch in n.children: if ch.type in STARS: features.add(feature) if ch.type == syms.argument: for argch in ch.children: if argch.type in STARS: features.add(feature) # Python 2 only features (for its deprecation) except for 10L, see above elif n.type == syms.print_stmt: features.add(Feature.PRINT_STMT) elif n.type == syms.exec_stmt: features.add(Feature.EXEC_STMT) elif n.type == syms.tfpdef: # def set_position((x, y), value): # ... features.add(Feature.AUTOMATIC_PARAMETER_UNPACKING) elif n.type == syms.except_clause: # try: # ... # except Exception, err: # ... if len(n.children) >= 4: if n.children[-2].type == token.COMMA: features.add(Feature.COMMA_STYLE_EXCEPT) elif n.type == syms.raise_stmt: # raise Exception, ""msg"" if len(n.children) >= 4: if n.children[-2].type == token.COMMA: features.add(Feature.COMMA_STYLE_RAISE) elif n.type == token.BACKQUOTE: # `i'm surprised this ever existed` features.add(Feature.BACKQUOTE_REPR) return features " 46352,"def enable(name=None, verbose=True, deprecation=True): if LooseVersion(sklearn_version) < LooseVersion(""0.21.0""): raise NotImplementedError( ""daal4py patches apply for scikit-learn >= 0.21.0 only ..."") if name is not None: do_patch(name) else: for key in _get_map_of_algorithms(): do_patch(key) if deprecation: set_idp_sklearn_verbose() warn(""Scikit-learn patching with daal4py is deprecated "" ""and will be removed in the future.\n"" ""Please, use Intel(R) Extension for Scikit-learn module instead "" ""(pip install scikit-learn-intelex)\n"" ""To enable patching, please, use one of options:\n"" ""1) python -m sklearnex \n"" ""2) from sklearnex import patch_sklearn\n"" "" patch_sklearn()"", FutureWarning, stacklevel=2) if verbose and sys.stderr is not None: sys.stderr.write( ""Intel(R) oneAPI Data Analytics Library solvers for sklearn enabled: "" ""https://intelpython.github.io/daal4py/sklearn.html\n"") logging.warning('Please, do re-import of imported scikit-learn modules ' 'after patch_sklearn()') ","def enable(name=None, verbose=True, deprecation=True): if LooseVersion(sklearn_version) < LooseVersion(""0.21.0""): raise NotImplementedError( ""daal4py patches apply for scikit-learn >= 0.21.0 only ..."") if name is not None: do_patch(name) else: for key in _get_map_of_algorithms(): do_patch(key) if deprecation: set_idp_sklearn_verbose() warn(""Scikit-learn patching with daal4py is deprecated "" ""and will be removed in the future.\n"" ""Please, use Intel(R) Extension for Scikit-learn module instead "" ""(pip install scikit-learn-intelex)\n"" ""To enable patching, please, use one of options:\n"" ""1) python -m sklearnex \n"" ""2) from sklearnex import patch_sklearn\n"" "" patch_sklearn()"", FutureWarning, stacklevel=2) if verbose and sys.stderr is not None: sys.stderr.write( ""Intel(R) oneAPI Data Analytics Library solvers for sklearn enabled: "" ""https://intelpython.github.io/daal4py/sklearn.html\n"") logging.warning('Reimport previously imported scikit-learn modules ' 'after patch_sklearn()') " 31438,"def bulk_resolve_alert_command(client: Client, args: dict): """""" Deprecated by: close_true_positive_command """""" alert_ids = args.get('alert_ids') custom_filter = args.get('custom_filter') comment = args.get('comment') request_data = args_to_filter_for_dismiss_and_resolve_alerts(alert_ids, custom_filter, comment) resolve_alerts = client.resolve_bulk_alerts(request_data) number_of_resolved_alerts = resolve_alerts['closed_true_positive'] return CommandResults( readable_output=f'{number_of_resolved_alerts} alerts resolved', outputs_prefix='MicrosoftCloudAppSecurity.Alerts', outputs_key_field='alert_id', outputs=resolve_alerts ) ","def bulk_resolve_alert_command(client: Client, args: dict): """""" Deprecated: use close_true_positive_command instead """""" alert_ids = args.get('alert_ids') custom_filter = args.get('custom_filter') comment = args.get('comment') request_data = args_to_filter_for_dismiss_and_resolve_alerts(alert_ids, custom_filter, comment) resolve_alerts = client.resolve_bulk_alerts(request_data) number_of_resolved_alerts = resolve_alerts['closed_true_positive'] return CommandResults( readable_output=f'{number_of_resolved_alerts} alerts resolved', outputs_prefix='MicrosoftCloudAppSecurity.Alerts', outputs_key_field='alert_id', outputs=resolve_alerts ) " 23819,"def _get_profile_compiler_version(compiler, version, output): tokens = version.split(""."") major = tokens[0] minor = tokens[1] if len(tokens) > 1 else 0 if compiler == ""clang"" and int(major) >= 8: output.info(""clang>=8, using the major as version"") return major elif compiler == ""gcc"" and int(major) >= 5: output.info(""gcc>=5, using the major as version"") return major elif compiler == ""Visual Studio"": return major elif compiler == ""intel"" and (int(major) < 19 or (int(major) == 19 and int(minor) == 0)): return major elif compiler == ""msvc"": # by default, drop the last digit of the minor (19.30 -> 19.3) if len(minor) == 2: version = version[:len(version) - 1] return version ","def _get_profile_compiler_version(compiler, version, output): tokens = version.split(""."") major = tokens[0] minor = tokens[1] if len(tokens) > 1 else 0 if compiler == ""clang"" and int(major) >= 8: output.info(""clang>=8, using the major as version"") return major elif compiler == ""gcc"" and int(major) >= 5: output.info(""gcc>=5, using the major as version"") return major elif compiler == ""Visual Studio"": return major elif compiler == ""intel"" and (int(major) < 19 or (int(major) == 19 and int(minor) == 0)): return major elif compiler == ""msvc"": # by default, drop the last digit of the minor (19.30 -> 19.3) if len(minor) == 2: version = version[:-1] return version " 31138,"def url_quota_command(): cmd_url = '/urlCategories/urlQuota' response = http_request('GET', cmd_url).json() human_readable = { 'Unique Provisioned URLs': response.get('uniqueUrlsProvisioned'), 'Remaining URLs Quota': response.get('remainingUrlsQuota') } entry = { 'Type': entryTypes['note'], 'Contents': response, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown(""Quota Information"", human_readable), 'EntryContext': {'Zscaler.Quota': response} } return entry ","def url_quota_command(): cmd_url = '/urlCategories/urlQuota' response = http_request('GET', cmd_url).json() human_readable = { 'Unique Provisioned URLs': response.get('uniqueUrlsProvisioned'), 'Remaining URLs Quota': response.get('remainingUrlsQuota') } entry = { 'Type': entryTypes['note'], 'Contents': response, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown(""Quota Information"", human_readable), 'EntryContext': {'Zscaler.Quota': response}, } return entry " 30798,"def link_forensics_artifacts_name_command(file_names, client: Client, args: dict) -> CommandResults: event_id = args.get(""event_id"", 0) if len(file_names) > 0: outputs = { 'eventId': int(event_id), 'Artifacts': file_names } return CommandResults( outputs_prefix='Illusive.Event', outputs_key_field='eventId', outputs=outputs ) else: readable_output = f'### event id {event_id} has no artifacts' return CommandResults( readable_output=readable_output, outputs_prefix='Illusive.Event', outputs_key_field='eventId', outputs={'eventId': int(event_id)} ) ","def link_forensics_artifacts_name_command(file_names, client: Client, args: dict) -> CommandResults: event_id = args.get(""event_id"", 0) if len(file_names) > 0: outputs = { 'eventId': int(event_id), 'Artifacts': file_names } return CommandResults( outputs_prefix='Illusive.Event', outputs_key_field='eventId', outputs=outputs ) else: readable_output = f'### Event ID {event_id} has no artifacts' return CommandResults( readable_output=readable_output, outputs_prefix='Illusive.Event', outputs_key_field='eventId', outputs={'eventId': int(event_id)} ) " 13081,"def create_default_product_type(apps, schema_editor): ProductType = apps.get_model(""product"", ""ProductType"") ProductType.objects.create( name=""Default Type"", slug=""default-type"", kind=ProductTypeKind.NORMAL, has_variants=False, is_shipping_required=True, ) ","def create_default_product_type(apps, schema_editor): ProductType = apps.get_model(""product"", ""ProductType"") ProductType.objects.create( name=""Default Product Type"", slug=""default-type"", kind=ProductTypeKind.NORMAL, has_variants=False, is_shipping_required=True, ) " 58393,"def _py_filename(path): if not path: return None if path[-4:] in ('.pyc', '.pyo'): path = path.rstrip('co') if path.endswith('.py'): return path if os.path.exists(path) and _looks_like_script(path): return path basepath = os.path.basename(path) for filename in SPECIAL_FILE_PATHS: if basepath == filename: return path ","def _py_filename(path): if not path: return None if path[-4:] in ('.pyc', '.pyo'): path = path.rstrip('co') if path.endswith('.py'): return path if os.path.exists(path) and _looks_like_script(path): return path basepath = os.path.basename(path) if basepath in SPECIAL_FILE_PATHS: return path " 20738,"def test_exportJobError(upload_manager): mocked_application = MagicMock() with patch(""UM.Application.Application.getInstance"", MagicMock(return_value=mocked_application)): upload_manager._onJobExportError(""file_name.3mf"") # Ensure that message was displayed mocked_application.showMessageSignal.emit.assert_called_once()","def test_exportJobError(upload_manager): mocked_application = MagicMock() with patch(""UM.Application.Application.getInstance"", MagicMock(return_value = mocked_application)): upload_manager._onJobExportError(""file_name.3mf"") # Ensure that message was displayed mocked_application.showMessageSignal.emit.assert_called_once()" 36216,"def _new_get_redirect(request, path): if hasattr(request, 'LANGUAGE_CODE'): # If this path has an i18n_patterns locale prefix, remove it. locale_prefix = f'/{request.LANGUAGE_CODE}/' if path.startswith(locale_prefix): path = path.replace(locale_prefix, '/', 1) # Then hand off processing to the original redirect logic. redirect = _original_get_redirect(request, path) # Wagtail currently does not forward query arguments, so for # any URL with stripped query arguments we make a new, on-the-fly # redirect with the same path/site bindings, but an updated link # with the query arguments restored. if redirect and request.GET: target = redirect.link if request.GET and ""?"" not in target: redirect = Redirect( old_path=redirect.old_path, site=redirect.site, redirect_link=f'{redirect.link}?{urlencode(request.GET)}' ) return redirect ","def _new_get_redirect(request, path): if hasattr(request, 'LANGUAGE_CODE'): # If this path has an i18n_patterns locale prefix, remove it. locale_prefix = f'/{request.LANGUAGE_CODE}/' if path.startswith(locale_prefix): path = path.replace(locale_prefix, '/', 1) # Then hand off processing to the original redirect logic. redirect = _original_get_redirect(request, path) # Wagtail currently does not forward query arguments, so for # any URL with stripped query arguments we make a new, on-the-fly # redirect with the same path/site bindings, but an updated link # with the query arguments restored. if redirect and request.GET: target = redirect.link if ""?"" not in target: redirect = Redirect( old_path=redirect.old_path, site=redirect.site, redirect_link=f'{redirect.link}?{urlencode(request.GET)}' ) return redirect " 42804,"def reindex_func(frame, indexer=None): """""" Function to reshape dataframe in pivot_longer, to try and make it look similar to the source data in terms of direction of the columns. It is a temporary measure until the minimum pandas version is 1.1, where we can take advantage of the `ignore_index` argument in `pd.melt`. Example: if columns are `id, ht1, ht2, ht3`, then depending on the arguments passed, the column in the reshaped dataframe, based on this function, will look like `1,2,3,1,2,3,1,2,3...`. This way, we ensure that for every index, there is a complete set of the data. A reindexed dataframe is returned. """""" if indexer is None: uniq_index_length = len(frame.drop_duplicates()) else: uniq_index_length = len(frame.loc[:, indexer].drop_duplicates()) sorter = np.reshape(frame.index, (-1, uniq_index_length)) # reshaped in Fortan order achieves the alternation sorter = np.ravel(sorter, order=""F"") return frame.reindex(sorter) ","def reindex_func(frame: pd.DataFrame, indexer=None) -> pd.DataFrame: """""" Function to reshape dataframe in pivot_longer, to try and make it look similar to the source data in terms of direction of the columns. It is a temporary measure until the minimum pandas version is 1.1, where we can take advantage of the `ignore_index` argument in `pd.melt`. Example: if columns are `id, ht1, ht2, ht3`, then depending on the arguments passed, the column in the reshaped dataframe, based on this function, will look like `1,2,3,1,2,3,1,2,3...`. This way, we ensure that for every index, there is a complete set of the data. A reindexed dataframe is returned. """""" if indexer is None: uniq_index_length = len(frame.drop_duplicates()) else: uniq_index_length = len(frame.loc[:, indexer].drop_duplicates()) sorter = np.reshape(frame.index, (-1, uniq_index_length)) # reshaped in Fortan order achieves the alternation sorter = np.ravel(sorter, order=""F"") return frame.reindex(sorter) " 20519,"def register_wrapper(fname_src, fname_dest, param, paramregmulti, fname_src_seg='', fname_dest_seg='', fname_src_label='', fname_dest_label='', fname_mask='', fname_initwarp='', fname_initwarpinv='', identity=False, interp='linear', fname_output='', fname_output_warp='', path_out='', same_space=False): """""" Wrapper for image registration. :param fname_src: :param fname_dest: :param param: Class Param(): See definition in sct_register_multimodal :param paramregmulti: Class ParamregMultiStep(): See definition in this file :param fname_src_seg: :param fname_dest_seg: :param fname_src_label: :param fname_dest_label: :param fname_mask: :param fname_initwarp: str: File name of initial transformation :param fname_initwarpinv: str: File name of initial inverse transformation :param identity: :param interp: :param fname_output: :param fname_output_warp: :param path_out: :param same_space: Bool: Source and destination images are in the same physical space (i.e. same coordinates). :return: fname_src2dest, fname_dest2src, fname_output_warp, fname_output_warpinv """""" # TODO: move interp inside param. # TODO: merge param inside paramregmulti by having a ""global"" sets of parameters that apply to all steps # Extract path, file and extension path_src, file_src, ext_src = sct.extract_fname(fname_src) path_dest, file_dest, ext_dest = sct.extract_fname(fname_dest) # check if source and destination images have the same name (related to issue #373) # If so, change names to avoid conflict of result files and warns the user suffix_src, suffix_dest = '_reg', '_reg' if file_src == file_dest: suffix_src, suffix_dest = '_src_reg', '_dest_reg' # define output folder and file name if fname_output == '': path_out = '' if not path_out else path_out # output in user's current directory file_out = file_src + suffix_src file_out_inv = file_dest + suffix_dest ext_out = ext_src else: path, file_out, ext_out = sct.extract_fname(fname_output) path_out = path if not path_out else path_out file_out_inv = file_out + '_inv' # create temporary folder path_tmp = sct.tmp_create(basename=""register"") sct.printv('\nCopying input data to tmp folder and convert to nii...', param.verbose) Image(fname_src).save(os.path.join(path_tmp, ""src.nii"")) Image(fname_dest).save(os.path.join(path_tmp, ""dest.nii"")) if fname_src_seg: Image(fname_src_seg).save(os.path.join(path_tmp, ""src_seg.nii"")) if fname_dest_seg: Image(fname_dest_seg).save(os.path.join(path_tmp, ""dest_seg.nii"")) if fname_src_label: Image(fname_src_label).save(os.path.join(path_tmp, ""src_label.nii"")) Image(fname_dest_label).save(os.path.join(path_tmp, ""dest_label.nii"")) if fname_mask != '': Image(fname_mask).save(os.path.join(path_tmp, ""mask.nii.gz"")) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # reorient destination to RPI Image('dest.nii').change_orientation(""RPI"").save('dest_RPI.nii') if fname_dest_seg: Image('dest_seg.nii').change_orientation(""RPI"").save('dest_seg_RPI.nii') if fname_dest_label: Image('dest_label.nii').change_orientation(""RPI"").save('dest_label_RPI.nii') if fname_mask: # TODO: change output name Image('mask.nii.gz').change_orientation(""RPI"").save('mask.nii.gz') if identity: # overwrite paramregmulti and only do one identity transformation step0 = Paramreg(step='0', type='im', algo='syn', metric='MI', iter='0', shrink='1', smooth='0', gradStep='0.5') paramregmulti = ParamregMultiStep([step0]) # initialize list of warping fields warp_forward = [] warp_forward_winv = [] warp_inverse = [] warp_inverse_winv = [] generate_warpinv = 1 # initial warping is specified, update list of warping fields and skip step=0 if fname_initwarp: sct.printv('\nSkip step=0 and replace with initial transformations: ', param.verbose) sct.printv(' ' + fname_initwarp, param.verbose) # sct.copy(fname_initwarp, 'warp_forward_0.nii.gz') warp_forward.append(fname_initwarp) start_step = 1 if fname_initwarpinv: warp_inverse.append(fname_initwarpinv) else: sct.printv('\nWARNING: No initial inverse warping field was specified, therefore the inverse warping field ' 'will NOT be generated.', param.verbose, 'warning') generate_warpinv = 0 else: if same_space: start_step = 1 else: start_step = 0 # loop across registration steps for i_step in range(start_step, len(paramregmulti.steps)): sct.printv('\n--\nESTIMATE TRANSFORMATION FOR STEP #' + str(i_step), param.verbose) # identify which is the src and dest if paramregmulti.steps[str(i_step)].type == 'im': src = ['src.nii'] dest = ['dest_RPI.nii'] interp_step = ['spline'] elif paramregmulti.steps[str(i_step)].type == 'seg': src = ['src_seg.nii'] dest = ['dest_seg_RPI.nii'] interp_step = ['nn'] elif paramregmulti.steps[str(i_step)].type == 'imseg': src = ['src.nii', 'src_seg.nii'] dest = ['dest_RPI.nii', 'dest_seg_RPI.nii'] interp_step = ['spline', 'nn'] elif paramregmulti.steps[str(i_step)].type == 'label': src = ['src_label.nii'] dest = ['dest_label_RPI.nii'] interp_step = ['nn'] else: sct.printv('ERROR: Wrong image type: {}'.format(paramregmulti.steps[str(i_step)].type), 1, 'error') # if step>0, apply warp_forward_concat to the src image to be used if (not same_space and i_step > 0) or (same_space and i_step > 1): sct.printv('\nApply transformation from previous step', param.verbose) for ifile in range(len(src)): sct_apply_transfo.main(args=[ '-i', src[ifile], '-d', dest[ifile], '-w', warp_forward, '-o', sct.add_suffix(src[ifile], '_reg'), '-x', interp_step[ifile]]) src[ifile] = sct.add_suffix(src[ifile], '_reg') # register src --> dest warp_forward_out, warp_inverse_out = register(src, dest, paramregmulti, param, str(i_step)) # deal with transformations with ""-"" as prefix. They should be inverted with calling sct_concat_transfo. if warp_forward_out[0] == ""-"": warp_forward_out = warp_forward_out[1:] warp_forward_winv.append(warp_forward_out) if warp_inverse_out[0] == ""-"": warp_inverse_out = warp_inverse_out[1:] warp_inverse_winv.append(warp_inverse_out) # update list of forward/inverse transformations warp_forward.append(warp_forward_out) warp_inverse.insert(0, warp_inverse_out) # Concatenate transformations sct.printv('\nConcatenate transformations...', param.verbose) sct_concat_transfo.main(args=[ '-w', warp_forward, '-winv', warp_forward_winv, '-d', 'dest.nii', '-o', 'warp_src2dest.nii.gz']) sct_concat_transfo.main(args=[ '-w', warp_inverse, '-winv', warp_inverse_winv, '-d', 'src.nii', '-o', 'warp_dest2src.nii.gz']) # TODO: make the following code optional (or move it to sct_register_multimodal) # Apply warping field to src data sct.printv('\nApply transfo source --> dest...', param.verbose) sct_apply_transfo.main(args=[ '-i', 'src.nii', '-d', 'dest.nii', '-w', 'warp_src2dest.nii.gz', '-o', 'src_reg.nii', '-x', interp]) sct.printv('\nApply transfo dest --> source...', param.verbose) sct_apply_transfo.main(args=[ '-i', 'dest.nii', '-d', 'src.nii', '-w', 'warp_dest2src.nii.gz', '-o', 'dest_reg.nii', '-x', interp]) # come back os.chdir(curdir) # Generate output files # ------------------------------------------------------------------------------------------------------------------ sct.printv('\nGenerate output files...', param.verbose) # generate: src_reg fname_src2dest = sct.generate_output_file( os.path.join(path_tmp, ""src_reg.nii""), os.path.join(path_out, file_out + ext_out), param.verbose) # generate: dest_reg fname_dest2src = sct.generate_output_file( os.path.join(path_tmp, ""dest_reg.nii""), os.path.join(path_out, file_out_inv + ext_dest), param.verbose) # generate: forward warping field if fname_output_warp == '': fname_output_warp = os.path.join(path_out, 'warp_' + file_src + '2' + file_dest + '.nii.gz') sct.generate_output_file(os.path.join(path_tmp, ""warp_src2dest.nii.gz""), fname_output_warp, param.verbose) # generate: inverse warping field if generate_warpinv: fname_output_warpinv = os.path.join(path_out, 'warp_' + file_dest + '2' + file_src + '.nii.gz') sct.generate_output_file(os.path.join(path_tmp, ""warp_dest2src.nii.gz""), fname_output_warpinv, param.verbose) else: fname_output_warpinv = None # Delete temporary files if param.remove_temp_files: sct.printv('\nRemove temporary files...', param.verbose) sct.rmtree(path_tmp, verbose=param.verbose) return fname_src2dest, fname_dest2src, fname_output_warp, fname_output_warpinv ","def register_wrapper(fname_src, fname_dest, param, paramregmulti, fname_src_seg='', fname_dest_seg='', fname_src_label='', fname_dest_label='', fname_mask='', fname_initwarp='', fname_initwarpinv='', identity=False, interp='linear', fname_output='', fname_output_warp='', path_out='', same_space=False): """""" Wrapper for image registration. :param fname_src: :param fname_dest: :param param: Class Param(): See definition in sct_register_multimodal :param paramregmulti: Class ParamregMultiStep(): See definition in this file :param fname_src_seg: :param fname_dest_seg: :param fname_src_label: :param fname_dest_label: :param fname_mask: :param fname_initwarp: str: File name of initial transformation :param fname_initwarpinv: str: File name of initial inverse transformation :param identity: :param interp: :param fname_output: :param fname_output_warp: :param path_out: :param same_space: Bool: Source and destination images are in the same physical space (i.e. same coordinates). :return: fname_src2dest, fname_dest2src, fname_output_warp, fname_output_warpinv """""" # TODO: move interp inside param. # TODO: merge param inside paramregmulti by having a ""global"" sets of parameters that apply to all steps # Extract path, file and extension path_src, file_src, ext_src = sct.extract_fname(fname_src) path_dest, file_dest, ext_dest = sct.extract_fname(fname_dest) # check if source and destination images have the same name (related to issue #373) # If so, change names to avoid conflict of result files and warns the user suffix_src, suffix_dest = '_reg', '_reg' if file_src == file_dest: suffix_src, suffix_dest = '_src_reg', '_dest_reg' # define output folder and file name if fname_output == '': path_out = '' if not path_out else path_out # output in user's current directory file_out = file_src + suffix_src file_out_inv = file_dest + suffix_dest ext_out = ext_src else: path, file_out, ext_out = sct.extract_fname(fname_output) path_out = path if not path_out else path_out file_out_inv = file_out + '_inv' # create temporary folder path_tmp = sct.tmp_create(basename=""register"") sct.printv('\nCopying input data to tmp folder and convert to nii...', param.verbose) Image(fname_src).save(os.path.join(path_tmp, ""src.nii"")) Image(fname_dest).save(os.path.join(path_tmp, ""dest.nii"")) if fname_src_seg: Image(fname_src_seg).save(os.path.join(path_tmp, ""src_seg.nii"")) if fname_dest_seg: Image(fname_dest_seg).save(os.path.join(path_tmp, ""dest_seg.nii"")) if fname_src_label: Image(fname_src_label).save(os.path.join(path_tmp, ""src_label.nii"")) Image(fname_dest_label).save(os.path.join(path_tmp, ""dest_label.nii"")) if fname_mask != '': Image(fname_mask).save(os.path.join(path_tmp, ""mask.nii.gz"")) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # reorient destination to RPI Image('dest.nii').change_orientation(""RPI"").save('dest_RPI.nii') if fname_dest_seg: Image('dest_seg.nii').change_orientation(""RPI"").save('dest_seg_RPI.nii') if fname_dest_label: Image('dest_label.nii').change_orientation(""RPI"").save('dest_label_RPI.nii') if fname_mask: Image('mask.nii.gz').change_orientation(""RPI"").save('mask_RPI.nii.gz') if identity: # overwrite paramregmulti and only do one identity transformation step0 = Paramreg(step='0', type='im', algo='syn', metric='MI', iter='0', shrink='1', smooth='0', gradStep='0.5') paramregmulti = ParamregMultiStep([step0]) # initialize list of warping fields warp_forward = [] warp_forward_winv = [] warp_inverse = [] warp_inverse_winv = [] generate_warpinv = 1 # initial warping is specified, update list of warping fields and skip step=0 if fname_initwarp: sct.printv('\nSkip step=0 and replace with initial transformations: ', param.verbose) sct.printv(' ' + fname_initwarp, param.verbose) # sct.copy(fname_initwarp, 'warp_forward_0.nii.gz') warp_forward.append(fname_initwarp) start_step = 1 if fname_initwarpinv: warp_inverse.append(fname_initwarpinv) else: sct.printv('\nWARNING: No initial inverse warping field was specified, therefore the inverse warping field ' 'will NOT be generated.', param.verbose, 'warning') generate_warpinv = 0 else: if same_space: start_step = 1 else: start_step = 0 # loop across registration steps for i_step in range(start_step, len(paramregmulti.steps)): sct.printv('\n--\nESTIMATE TRANSFORMATION FOR STEP #' + str(i_step), param.verbose) # identify which is the src and dest if paramregmulti.steps[str(i_step)].type == 'im': src = ['src.nii'] dest = ['dest_RPI.nii'] interp_step = ['spline'] elif paramregmulti.steps[str(i_step)].type == 'seg': src = ['src_seg.nii'] dest = ['dest_seg_RPI.nii'] interp_step = ['nn'] elif paramregmulti.steps[str(i_step)].type == 'imseg': src = ['src.nii', 'src_seg.nii'] dest = ['dest_RPI.nii', 'dest_seg_RPI.nii'] interp_step = ['spline', 'nn'] elif paramregmulti.steps[str(i_step)].type == 'label': src = ['src_label.nii'] dest = ['dest_label_RPI.nii'] interp_step = ['nn'] else: sct.printv('ERROR: Wrong image type: {}'.format(paramregmulti.steps[str(i_step)].type), 1, 'error') # if step>0, apply warp_forward_concat to the src image to be used if (not same_space and i_step > 0) or (same_space and i_step > 1): sct.printv('\nApply transformation from previous step', param.verbose) for ifile in range(len(src)): sct_apply_transfo.main(args=[ '-i', src[ifile], '-d', dest[ifile], '-w', warp_forward, '-o', sct.add_suffix(src[ifile], '_reg'), '-x', interp_step[ifile]]) src[ifile] = sct.add_suffix(src[ifile], '_reg') # register src --> dest warp_forward_out, warp_inverse_out = register(src, dest, paramregmulti, param, str(i_step)) # deal with transformations with ""-"" as prefix. They should be inverted with calling sct_concat_transfo. if warp_forward_out[0] == ""-"": warp_forward_out = warp_forward_out[1:] warp_forward_winv.append(warp_forward_out) if warp_inverse_out[0] == ""-"": warp_inverse_out = warp_inverse_out[1:] warp_inverse_winv.append(warp_inverse_out) # update list of forward/inverse transformations warp_forward.append(warp_forward_out) warp_inverse.insert(0, warp_inverse_out) # Concatenate transformations sct.printv('\nConcatenate transformations...', param.verbose) sct_concat_transfo.main(args=[ '-w', warp_forward, '-winv', warp_forward_winv, '-d', 'dest.nii', '-o', 'warp_src2dest.nii.gz']) sct_concat_transfo.main(args=[ '-w', warp_inverse, '-winv', warp_inverse_winv, '-d', 'src.nii', '-o', 'warp_dest2src.nii.gz']) # TODO: make the following code optional (or move it to sct_register_multimodal) # Apply warping field to src data sct.printv('\nApply transfo source --> dest...', param.verbose) sct_apply_transfo.main(args=[ '-i', 'src.nii', '-d', 'dest.nii', '-w', 'warp_src2dest.nii.gz', '-o', 'src_reg.nii', '-x', interp]) sct.printv('\nApply transfo dest --> source...', param.verbose) sct_apply_transfo.main(args=[ '-i', 'dest.nii', '-d', 'src.nii', '-w', 'warp_dest2src.nii.gz', '-o', 'dest_reg.nii', '-x', interp]) # come back os.chdir(curdir) # Generate output files # ------------------------------------------------------------------------------------------------------------------ sct.printv('\nGenerate output files...', param.verbose) # generate: src_reg fname_src2dest = sct.generate_output_file( os.path.join(path_tmp, ""src_reg.nii""), os.path.join(path_out, file_out + ext_out), param.verbose) # generate: dest_reg fname_dest2src = sct.generate_output_file( os.path.join(path_tmp, ""dest_reg.nii""), os.path.join(path_out, file_out_inv + ext_dest), param.verbose) # generate: forward warping field if fname_output_warp == '': fname_output_warp = os.path.join(path_out, 'warp_' + file_src + '2' + file_dest + '.nii.gz') sct.generate_output_file(os.path.join(path_tmp, ""warp_src2dest.nii.gz""), fname_output_warp, param.verbose) # generate: inverse warping field if generate_warpinv: fname_output_warpinv = os.path.join(path_out, 'warp_' + file_dest + '2' + file_src + '.nii.gz') sct.generate_output_file(os.path.join(path_tmp, ""warp_dest2src.nii.gz""), fname_output_warpinv, param.verbose) else: fname_output_warpinv = None # Delete temporary files if param.remove_temp_files: sct.printv('\nRemove temporary files...', param.verbose) sct.rmtree(path_tmp, verbose=param.verbose) return fname_src2dest, fname_dest2src, fname_output_warp, fname_output_warpinv " 12724,"def docker_build_failed(address: Address, context: DockerBuildContext, colors: bool) -> str | None: if not context.copy_source_vs_context_source: return None msg = ( f""Docker build failed for `docker_image` {address}. The {context.dockerfile} have `COPY`"" ""instructions where the source files may not have been found in the Docker build context."" ""\n\n"" ) renames = [ format_rename_suggestion(src, dst, colors=colors) for src, dst in context.copy_source_vs_context_source if src and dst ] if renames: msg += ( f""However there are possible matches. Please review the following list of suggested "" f""renames:\n\n{bullet_list(renames)}\n\n"" ) unknown = [src for src, dst in context.copy_source_vs_context_source if not dst] if unknown: msg += ( f""The following files where not found in the Docker build context:\n\n"" f""{bullet_list(unknown)}\n\n"" ) unreferenced = [dst for src, dst in context.copy_source_vs_context_source if not src] if unreferenced: if len(unreferenced) > 10: unreferenced = unreferenced[:9] + [f""... and {len(unreferenced)-9} more""] msg += ( f""There are additional files in the Docker build context that were not referenced by "" f""any `COPY` instruction (this is not an error):\n\n{bullet_list(unreferenced)}\n\n"" ) return msg ","def docker_build_failed(address: Address, context: DockerBuildContext, colors: bool) -> str | None: if not context.copy_source_vs_context_source: return None msg = ( f""Docker build failed for `docker_image` {address}. The {context.dockerfile} have `COPY` "" ""instructions where the source files may not have been found in the Docker build context."" ""\n\n"" ) renames = [ format_rename_suggestion(src, dst, colors=colors) for src, dst in context.copy_source_vs_context_source if src and dst ] if renames: msg += ( f""However there are possible matches. Please review the following list of suggested "" f""renames:\n\n{bullet_list(renames)}\n\n"" ) unknown = [src for src, dst in context.copy_source_vs_context_source if not dst] if unknown: msg += ( f""The following files where not found in the Docker build context:\n\n"" f""{bullet_list(unknown)}\n\n"" ) unreferenced = [dst for src, dst in context.copy_source_vs_context_source if not src] if unreferenced: if len(unreferenced) > 10: unreferenced = unreferenced[:9] + [f""... and {len(unreferenced)-9} more""] msg += ( f""There are additional files in the Docker build context that were not referenced by "" f""any `COPY` instruction (this is not an error):\n\n{bullet_list(unreferenced)}\n\n"" ) return msg " 913,"def multiset_derangements(s): """"""Generate derangements of the elements of s *in place*. Examples ======== >>> from sympy.utilities.iterables import multiset_derangements, uniq Because the derangements of multisets (not sets) are generated in place, copies of the return value must be made if a collection of derangements is desired or else all values will be the same: >>> list(uniq([i for i in multiset_derangements('1233')])) [['3', '3', '2', '1']] >>> [i.copy() for i in multiset_derangements('1233')] [['3', '3', '1', '2'], ['3', '3', '2', '1']] """""" ms = multiset(s) mx = max(ms.values()) n = len(s) # impossible case if mx*2 > n: return rv = [None]*n # special cases # 1) singletons if len(ms) == n: for p in generate_derangements(s): yield p return # 2) aaabbb-like if len(ms) == 2 and len(set(ms.values())) == 1: x, y = list(ms) yield [x if i == y else y for i in s] return for M in ms: if ms[M] == mx: break inonM = [i for i in range(n) if s[i] != M] iM = [i for i in range(n) if s[i] == M] # 3) half are the same if 2*mx == n: for i in inonM: rv[i] = M for p in multiset_permutations([s[i] for i in inonM]): for i, pi in zip(iM, p): rv[i] = pi yield rv return # 4) single repeat covers all but 1 of the non-repeats if n - 2*mx == 1: for i in range(len(inonM)): i1 = inonM[i] ifill = inonM[:i] + inonM[i+1:] for j in ifill: rv[j] = M rv[i1] = s[i1] for p in permutations([s[j] for j in ifill]): for j, pi in zip(iM, p): rv[j] = pi for j in iM: rv[j], rv[i1] = rv[i1], rv[j] yield rv i1 = j return def iopen(v): return [i for i in range(n) if rv[i] is None and s[i] != v] def do(j): if j == -1: yield rv else: M, mx = take[j] for i in subsets(iopen(M), mx): for ii in i: rv[ii] = M yield from do(j - 1) for ii in i: rv[ii] = None take = sorted(ms.items(), key=lambda x:(x[1], x[0])) return do(len(take) - 1) ","def multiset_derangements(s): """"""Generate derangements of the elements of s *in place*. Examples ======== >>> from sympy.utilities.iterables import multiset_derangements, uniq Because the derangements of multisets (not sets) are generated in place, copies of the return value must be made if a collection of derangements is desired or else all values will be the same: >>> list(uniq([i for i in multiset_derangements('1233')])) [['3', '3', '2', '1']] >>> [i.copy() for i in multiset_derangements('1233')] [['3', '3', '1', '2'], ['3', '3', '2', '1']] """""" ms = multiset(s) mx = max(ms.values()) n = len(s) # impossible case if mx*2 > n: return rv = [None]*n # special cases # 1) singletons if len(ms) == n: for p in generate_derangements(s): yield p return # 2) aaabbb-like if len(ms) == 2 and len(set(ms.values())) == 1: x, y = list(ms) yield [x if i == y else y for i in s] return for M in ms: if ms[M] == mx: break inonM = [i for i in range(n) if s[i] != M] iM = [i for i in range(n) if s[i] == M] # 3) half are the same if 2*mx == n: for i in inonM: rv[i] = M for p in multiset_permutations([s[i] for i in inonM]): for i, pi in zip(iM, p): rv[i] = pi yield rv return # 4) single repeat covers all but 1 of the non-repeats if n - 2*mx == 1: for i in range(len(inonM)): i1 = inonM[i] ifill = inonM[:i] + inonM[i+1:] for j in ifill: rv[j] = M rv[i1] = s[i1] for p in permutations([s[j] for j in ifill]): for j, pi in zip(iM, p): rv[j] = pi for j in iM: rv[j], rv[i1] = rv[i1], rv[j] yield rv i1 = j return def iopen(v): return [i for i in range(n) if rv[i] is None and s[i] != v] def do(j): if j == -1: yield rv else: M, mx = take[j] for i in subsets(iopen(M), mx): for ii in i: rv[ii] = M yield from do(j - 1) for ii in i: rv[ii] = None take = sorted(ms.items(), key=lambda x:(x[1], x[0])) yield from do(len(take) - 1) " 31596,"def query_incident_cmd(): result = query_incident(demisto.args()[""number""], workflow_query=True) # Create minimal signature list data = result.get(""SignatureList"", {}).get(""Signature"", []) if not isinstance(data, list): data = [data] sigs = [] for sig in data: sig_dict = dict() # type: Dict[str, Any] sig_dict[""SourceIPString""] = sig[""SourceIPString""] sig_dict[""SignatureName""] = sig[""SignatureName""] sig_dict[""VendorSignature""] = sig[""VendorSignature""] sig_dict[""NumberBlocked""] = sig[""NumberBlocked""] sig_dict[""NumberNotBlocked""] = sig[""NumberNotBlocked""] sigs.append(sig_dict) # Set Human readable flatten_relevant_fields = [{ ""Incident Number"": result.get(""IncidentNumber"", """"), ""Time Created"": result.get(""TimeCreated"", """"), ""Status"": result.get(""WorkFlowDetail"", {}).get(""Status"", """"), ""Classification"": result.get(""Classification"", """"), ""Assigned Person"": result.get(""WorkFlowDetail"", {}).get(""AssignedPerson"", """") if result.get(""WorkFlowDetail"", {}) else """", ""Description"": result.get(""Description"", """"), ""Analyst Assessment"": result.get(""AnalystAssessment"", """"), ""Number of Analyzed Signatures"": result.get(""NumberOfAnalyzedSignatures"", """"), ""Signaturtes"": json.dumps(sigs) or """", ""Related Incidents"": json.dumps(result.get(""RelatedIncidents"", {}).get(""IncidentNumber"", """")) if result.get(""RelatedIncidents"", {}) else """", ""Comment"": result.get(""IncidentComments"", {}).get(""IncidentComment"", {}).get(""Comment"", """") if result.get(""IncidentComments"", {}) else """" }] headers = [ ""Incident Number"", ""Time Created"", ""Status"", ""Classification"", ""Assigned Person"", ""Description"", ""Analyst Assessment"", ""Number of Analyzed Signatures"", ""Signaturtes"", ""Related Incidents"", ""Comment"" ] hr = tableToMarkdown(""Incident query"", flatten_relevant_fields, headers) # Set context result_ctx = { ""IncidentNumber"": result.get(""IncidentNumber"", """"), ""NumberOfAnalyzedSignatures"": result.get(""NumberOfAnalyzedSignatures"", """"), ""SignatureList"": { ""Signature"": sigs }, ""TimeCreated"": result.get(""TimeCreated"", """"), ""Classification"": result.get(""Classification"", """"), ""Description"": result.get(""Description"", """"), ""AnalystAssessment"": result.get(""AnalystAssessment"", """"), ""CountryCode"": result.get(""CountryCode"", """"), ""CountryName"": result.get(""CountryName"", """"), ""RelatedTickets"": result.get(""RelatedTickets"", """"), ""WorkFlowDetail"": { ""Status"": result.get(""WorkFlowDetail"", {}).get(""Status"", """"), ""AssignedPerson"": result.get(""WorkFlowDetail"", {}).get(""AssignedPerson"", """") }, ""RelatedIncidents"": { ""IncidentNumber"": result[""RelatedIncidents""][""IncidentNumber""] if result.get(""RelatedIncidents"") else """" } } if result.get('IncidentComments') and result.get('IncidentComments').get('IncidentComment'): result_ctx[""IncidentComments""] = {""IncidentComment"": { ""CommentedTimeStampGMT"": result[""IncidentComments""][""IncidentComment""][""CommentedTimeStampGMT""], ""Comment"": result[""IncidentComments""][""IncidentComment""][""Comment""], ""CommentedBy"": result[""IncidentComments""][""IncidentComment""][""CommentedBy""] } } else: result_ctx[""IncidentComments""] = {} if result.get(""IncidentAttachmentItems"") and result.get('IncidentAttachmentItems').get('IncidentAttachmentItem'): result_ctx['IncidentAttachmentItems'] = {""IncidentAttachmentItem"": { ""AttachmentNumber"": result[""IncidentAttachmentItems""][""IncidentAttachmentItem""][""AttachmentNumber""], ""AttachmentName"": result[""IncidentAttachmentItems""][""IncidentAttachmentItem""][""AttachmentName""], ""UploadDateGMT"": result[""IncidentAttachmentItems""][""IncidentAttachmentItem""][""UploadDateGMT""], ""UploadBy"": result[""IncidentAttachmentItems""][""IncidentAttachmentItem""][""UploadBy""], ""Comment"": result[""IncidentAttachmentItems""][""IncidentAttachmentItem""][""Comment""] } } else: result_ctx['IncidentAttachmentItems'] = {} context = { ""Symantec MSS.Incident query(val.IncidentNumber && val.IncidentNumber === obj.IncidentNumber)"": result_ctx } demisto.results({ ""ContentsFormat"": formats[""json""], ""Type"": entryTypes[""note""], ""Contents"": result, ""EntryContext"": context, ""ReadableContentsFormat"": formats[""markdown""], ""HumanReadable"": hr }) ","def query_incident_cmd(): result = query_incident(demisto.args()[""number""], workflow_query=True) # Create minimal signature list data = result.get(""SignatureList"", {}).get(""Signature"") or [] if not isinstance(data, list): data = [data] sigs = [] for sig in data: sig_dict = dict() # type: Dict[str, Any] sig_dict[""SourceIPString""] = sig[""SourceIPString""] sig_dict[""SignatureName""] = sig[""SignatureName""] sig_dict[""VendorSignature""] = sig[""VendorSignature""] sig_dict[""NumberBlocked""] = sig[""NumberBlocked""] sig_dict[""NumberNotBlocked""] = sig[""NumberNotBlocked""] sigs.append(sig_dict) # Set Human readable flatten_relevant_fields = [{ ""Incident Number"": result.get(""IncidentNumber"", """"), ""Time Created"": result.get(""TimeCreated"", """"), ""Status"": result.get(""WorkFlowDetail"", {}).get(""Status"", """"), ""Classification"": result.get(""Classification"", """"), ""Assigned Person"": result.get(""WorkFlowDetail"", {}).get(""AssignedPerson"", """") if result.get(""WorkFlowDetail"", {}) else """", ""Description"": result.get(""Description"", """"), ""Analyst Assessment"": result.get(""AnalystAssessment"", """"), ""Number of Analyzed Signatures"": result.get(""NumberOfAnalyzedSignatures"", """"), ""Signaturtes"": json.dumps(sigs) or """", ""Related Incidents"": json.dumps(result.get(""RelatedIncidents"", {}).get(""IncidentNumber"", """")) if result.get(""RelatedIncidents"", {}) else """", ""Comment"": result.get(""IncidentComments"", {}).get(""IncidentComment"", {}).get(""Comment"", """") if result.get(""IncidentComments"", {}) else """" }] headers = [ ""Incident Number"", ""Time Created"", ""Status"", ""Classification"", ""Assigned Person"", ""Description"", ""Analyst Assessment"", ""Number of Analyzed Signatures"", ""Signaturtes"", ""Related Incidents"", ""Comment"" ] hr = tableToMarkdown(""Incident query"", flatten_relevant_fields, headers) # Set context result_ctx = { ""IncidentNumber"": result.get(""IncidentNumber"", """"), ""NumberOfAnalyzedSignatures"": result.get(""NumberOfAnalyzedSignatures"", """"), ""SignatureList"": { ""Signature"": sigs }, ""TimeCreated"": result.get(""TimeCreated"", """"), ""Classification"": result.get(""Classification"", """"), ""Description"": result.get(""Description"", """"), ""AnalystAssessment"": result.get(""AnalystAssessment"", """"), ""CountryCode"": result.get(""CountryCode"", """"), ""CountryName"": result.get(""CountryName"", """"), ""RelatedTickets"": result.get(""RelatedTickets"", """"), ""WorkFlowDetail"": { ""Status"": result.get(""WorkFlowDetail"", {}).get(""Status"", """"), ""AssignedPerson"": result.get(""WorkFlowDetail"", {}).get(""AssignedPerson"", """") }, ""RelatedIncidents"": { ""IncidentNumber"": result[""RelatedIncidents""][""IncidentNumber""] if result.get(""RelatedIncidents"") else """" } } if result.get('IncidentComments') and result.get('IncidentComments').get('IncidentComment'): result_ctx[""IncidentComments""] = {""IncidentComment"": { ""CommentedTimeStampGMT"": result[""IncidentComments""][""IncidentComment""][""CommentedTimeStampGMT""], ""Comment"": result[""IncidentComments""][""IncidentComment""][""Comment""], ""CommentedBy"": result[""IncidentComments""][""IncidentComment""][""CommentedBy""] } } else: result_ctx[""IncidentComments""] = {} if result.get(""IncidentAttachmentItems"") and result.get('IncidentAttachmentItems').get('IncidentAttachmentItem'): result_ctx['IncidentAttachmentItems'] = {""IncidentAttachmentItem"": { ""AttachmentNumber"": result[""IncidentAttachmentItems""][""IncidentAttachmentItem""][""AttachmentNumber""], ""AttachmentName"": result[""IncidentAttachmentItems""][""IncidentAttachmentItem""][""AttachmentName""], ""UploadDateGMT"": result[""IncidentAttachmentItems""][""IncidentAttachmentItem""][""UploadDateGMT""], ""UploadBy"": result[""IncidentAttachmentItems""][""IncidentAttachmentItem""][""UploadBy""], ""Comment"": result[""IncidentAttachmentItems""][""IncidentAttachmentItem""][""Comment""] } } else: result_ctx['IncidentAttachmentItems'] = {} context = { ""Symantec MSS.Incident query(val.IncidentNumber && val.IncidentNumber === obj.IncidentNumber)"": result_ctx } demisto.results({ ""ContentsFormat"": formats[""json""], ""Type"": entryTypes[""note""], ""Contents"": result, ""EntryContext"": context, ""ReadableContentsFormat"": formats[""markdown""], ""HumanReadable"": hr }) " 10560,"def main(): """"""Main program entry point."""""" # noinspection PyBroadException try: from ssl import OPENSSL_VERSION_INFO version = list(OPENSSL_VERSION_INFO[:3]) except Exception: # pylint: disable=broad-except version = None print(json.dumps(dict( version=version, ))) ","def main(): """"""Main program entry point."""""" # noinspection PyBroadException try: from ssl import OPENSSL_VERSION_INFO version = list(OPENSSL_VERSION_INFO[:3]) except Exception: # pylint: disable=broad-except version = None print(json.dumps({""version"": version})) " 30900,"def upload_file_command(): args = demisto.args() entry_id = args.get('entry_id') title = args.get('title') malware_safety_lock = args.get('malware_safety_lock') or 'off' file_category = args.get('file_category') file_info = demisto.getFilePath(entry_id) if not title: title = file_info['name'] params = { 'name': file_info['name'], 'title': title, 'type': file_category, 'malware_locked': malware_locked_to_request_format(malware_safety_lock) } try: shutil.copy(file_info['path'], file_info['name']) except Exception as e: return_error('Failed to prepare file for upload. Error message: {0}'.format(str(e))) try: with open(file_info['name'], 'rb') as f: files = {'file': f} url_suffix = '/attachments' res = tq_request('POST', url_suffix, params, files=files) finally: shutil.rmtree(file_info['name'], ignore_errors=True) data = file_data_to_demisto_format(res['data']) ec = {CONTEXT_PATH['attachment']: data} readable_title = 'Successfully uploaded file {0}.'.format(file_info['name']) readable = build_readable(readable_title, 'attachment', data) return_outputs(readable, ec, res) ","def upload_file_command(): args = demisto.args() entry_id = args.get('entry_id') title = args.get('title') malware_safety_lock = args.get('malware_safety_lock', 'off') file_category = args.get('file_category') file_info = demisto.getFilePath(entry_id) if not title: title = file_info['name'] params = { 'name': file_info['name'], 'title': title, 'type': file_category, 'malware_locked': malware_locked_to_request_format(malware_safety_lock) } try: shutil.copy(file_info['path'], file_info['name']) except Exception as e: return_error('Failed to prepare file for upload. Error message: {0}'.format(str(e))) try: with open(file_info['name'], 'rb') as f: files = {'file': f} url_suffix = '/attachments' res = tq_request('POST', url_suffix, params, files=files) finally: shutil.rmtree(file_info['name'], ignore_errors=True) data = file_data_to_demisto_format(res['data']) ec = {CONTEXT_PATH['attachment']: data} readable_title = 'Successfully uploaded file {0}.'.format(file_info['name']) readable = build_readable(readable_title, 'attachment', data) return_outputs(readable, ec, res) " 22290,"def stream_to_open_named_file(stream, fd, filename, source_encoding=None, source_error='strict', target_encoding=None, target_error='strict'): """"""Writes a stream to the provided file descriptor, returns the file name. Closes file descriptor"""""" # signature and behavor is somewhat odd, due to backwards compatibility, but this can/should be done better CHUNK_SIZE = 1048576 try: codecs.lookup(target_encoding) except Exception: target_encoding = util.DEFAULT_ENCODING # utf-8 try_source_encoding = True while True: chunk = stream.read(CHUNK_SIZE) if not chunk: break if source_encoding is not None and try_source_encoding: # If a source encoding is given we use it to convert to the target encoding try: if not isinstance(chunk, text_type): chunk = chunk.decode(source_encoding, source_error) os.write(fd, chunk.encode(target_encoding, target_error)) except UnicodeDecodeError: try_source_encoding = False os.write(fd, chunk) else: # Compressed files must be encoded after they are uncompressed in the upload utility, # while binary files should not be encoded at all. if isinstance(chunk, text_type): chunk = chunk.encode(target_encoding, target_error) os.write(fd, chunk) os.close(fd) return filename ","def stream_to_open_named_file(stream, fd, filename, source_encoding=None, source_error='strict', target_encoding=None, target_error='strict'): """"""Writes a stream to the provided file descriptor, returns the file name. Closes file descriptor"""""" # signature and behavor is somewhat odd, due to backwards compatibility, but this can/should be done better CHUNK_SIZE = 1048576 try: codecs.lookup(target_encoding) except Exception: target_encoding = util.DEFAULT_ENCODING # utf-8 try_source_encoding = True while True: chunk = stream.read(CHUNK_SIZE) if not chunk: break if source_encoding is not None and try_source_encoding: # If a source encoding is given we use it to convert to the target encoding try: if not isinstance(chunk, text_type): chunk = chunk.decode(source_encoding, source_error) os.write(fd, chunk.encode(target_encoding, target_error)) except UnicodeDecodeError: use_source_encoding = False os.write(fd, chunk) else: # Compressed files must be encoded after they are uncompressed in the upload utility, # while binary files should not be encoded at all. if isinstance(chunk, text_type): chunk = chunk.encode(target_encoding, target_error) os.write(fd, chunk) os.close(fd) return filename " 8053,"def rar_renamer(nzo: NzbObject) -> int: """"""Deobfuscate rar file names: Use header and content information to give RAR-files decent names"""""" nzo.status = Status.VERIFYING nzo.set_unpack_info(""Repair"", T(""Trying RAR renamer"")) nzo.set_action_line(T(""Trying RAR renamer""), ""..."") renamed_files = 0 # This is the most important datastructure (in case of mixed obfuscated rarsets) rarvolnr = {} # rarvolnr will contain per rar vol number the rarfilenames and their respective contents (and maybe other characteristics, like filesizes). # for example: rarvolnr[6]['somerandomfilename.rar']={'readme.txt', 'linux.iso'}, # which means 'somerandomfilename.rar' has rarvolnumber 6, and contents 'readme.txt' and 'linux.iso' # if we find a rarfile with rarvolnumber 7, and 'linux.iso' in it, we have a match! # The volume number and real extension of a (obfuscated) rar file # so volnrext['dfakjldfalkjdfl.blabla'] = (14, 'part014.rar') or (2, 'r000') # Not really needed, but handy to avoid a second lookup at the renaming volnrext = {} # Scan rar files in workdir, but not subdirs workdir_files = os.listdir(nzo.download_path) for file_to_check in workdir_files: file_to_check = os.path.join(nzo.download_path, file_to_check) # We only want files: if not (os.path.isfile(file_to_check)): continue if rarfile.is_rarfile(file_to_check): # if a rar file is fully encrypted, rarfile.RarFile() will return no contents: fully_encrypted = rarfile.RarFile(file_to_check, single_file_check=True).filelist() == [] if fully_encrypted: logging.info( ""Download %s contains a fully encrypted & obfuscated rar-file %s. SABnzbd cannot deobfuscate those rar-files"", nzo.final_name, file_to_check, ) # bail out return renamed_files # The function will check if it's a RAR-file # We do a sanity-check for the returned number rar_vol, new_extension = rarvolinfo.get_rar_extension(file_to_check) if 0 < rar_vol < 1000: logging.debug(""Detected volume-number %s from RAR-header: %s "", rar_vol, file_to_check) volnrext[file_to_check] = (rar_vol, new_extension) # The files inside rar file rar_contents = rarfile.RarFile( os.path.join(nzo.download_path, file_to_check), single_file_check=True ).filelist() try: rarvolnr[rar_vol] except: # does not yet exist, so create: rarvolnr[rar_vol] = {} rarvolnr[rar_vol][file_to_check] = rar_contents # store them for matching (if needed) else: logging.debug(""No RAR-volume-number found in %s"", file_to_check) logging.debug(""Deobfuscate: rarvolnr is: %s"", rarvolnr) logging.debug(""Deobfuscate: volnrext is: %s"", volnrext) # Could be that there are no rar-files, we stop if not len(rarvolnr): return renamed_files # this can probably done with a max-key-lambda oneliner, but ... how? numberofrarsets = 0 for mykey in rarvolnr.keys(): numberofrarsets = max(numberofrarsets, len(rarvolnr[mykey])) logging.debug(""Number of rarset is %s"", numberofrarsets) if numberofrarsets == 1: # Just one obfuscated rarset ... that's easy logging.debug(""Deobfuscate: Just one obfuscated rarset"") for filename in volnrext: new_rar_name = ""%s.%s"" % (nzo.final_name, volnrext[filename][1]) new_rar_name = os.path.join(nzo.download_path, new_rar_name) new_rar_name = get_unique_filename(new_rar_name) logging.debug(""Deobfuscate: Renaming %s to %s"" % (filename, new_rar_name)) renamer(filename, new_rar_name) renamed_files += 1 return renamed_files # numberofrarsets bigger than 1, so a mixed rar set, so we need pre-checking # Sanity check of the rar set # Get the highest rar part number (that's the upper limit): highest_rar = sorted(rarvolnr.keys())[-1] # A staircase check: number of rarsets should no go up, but stay the same or go down how_many_previous = 1000 # 1000 rarset mixed ... should be enough ... typical is 1, 2 or maybe 3 # Start at part001.rar and go the highest for rar_set_number in range(1, highest_rar + 1): try: how_many_here = len(rarvolnr[rar_set_number]) except: # rarset does not exist at all logging.warning(""rarset %s is missing completely, so I can't deobfuscate."", rar_set_number) return 0 # OK, it exists, now let's check it's not higher if how_many_here > how_many_previous: # this should not happen: higher number of rarset than previous number of rarset logging.warning(""no staircase! rarset %s is higher than previous, so I can't deobfuscate."", rar_set_number) return 0 how_many_previous = how_many_here # OK, that looked OK (a declining staircase), so we can safely proceed # More than one obfuscated rarset, so we must do matching based of files inside the rar files # Assign (random) rar set names, first come first serve basis rarsetname = {} # in which rar set it should be, so rar set 'A', or 'B', or ... mychar = ""A"" # First things first: Assigning a rarsetname to the rar file which have volume number 1 for base_obfuscated_filename in rarvolnr[1]: rarsetname[base_obfuscated_filename] = mychar + ""--"" + nzo.final_name mychar = chr(ord(mychar) + 1) logging.debug(""Deobfuscate: rarsetname %s"", rarsetname) # Do the matching, layer by layer (read: rarvolnumber) # So, all rar files with rarvolnr 1, find the contents (files inside the rar), # and match with rarfiles with rarvolnr 2, and put them in the correct rarset. # And so on, until the highest rarvolnr minus 1 matched against highest rarvolnr for n in range(1, len(rarvolnr)): logging.debug(""Deobfuscate: Finding matches between rar sets %s and %s"" % (n, n + 1)) for base_obfuscated_filename in rarvolnr[n]: matchcounter = 0 for next_obfuscated_filename in rarvolnr[n + 1]: # set() method with intersection (less strict): set(rarvolnr[n][base_obfuscated_filename]).intersection(set(rarvolnr[n+1][next_obfuscated_filename])) # check if the last filename inside the existing rar matches with the first filename in the following rar if rarvolnr[n][base_obfuscated_filename][-1] == rarvolnr[n + 1][next_obfuscated_filename][0]: try: rarsetname[next_obfuscated_filename] = rarsetname[base_obfuscated_filename] matchcounter += 1 except KeyError: logging.warning(T(""No matching earlier rar file for %s""), next_obfuscated_filename) if matchcounter > 1: logging.info(""Deobfuscate: more than one match, so risk on false positive matching."") # Do the renaming: for filename in rarsetname: new_rar_name = ""%s.%s"" % (rarsetname[filename], volnrext[filename][1]) new_rar_name = os.path.join(nzo.download_path, new_rar_name) new_rar_name = get_unique_filename(new_rar_name) logging.debug(""Deobfuscate: Renaming %s to %s"" % (filename, new_rar_name)) renamer(filename, new_rar_name) renamed_files += 1 # Done: The obfuscated rar files have now been renamed to regular formatted filenames return renamed_files ","def rar_renamer(nzo: NzbObject) -> int: """"""Deobfuscate rar file names: Use header and content information to give RAR-files decent names"""""" nzo.status = Status.VERIFYING nzo.set_unpack_info(""Repair"", T(""Trying RAR renamer"")) nzo.set_action_line(T(""Trying RAR renamer""), ""..."") renamed_files = 0 # This is the most important datastructure (in case of mixed obfuscated rarsets) rarvolnr = {} # rarvolnr will contain per rar vol number the rarfilenames and their respective contents (and maybe other characteristics, like filesizes). # for example: rarvolnr[6]['somerandomfilename.rar']={'readme.txt', 'linux.iso'}, # which means 'somerandomfilename.rar' has rarvolnumber 6, and contents 'readme.txt' and 'linux.iso' # if we find a rarfile with rarvolnumber 7, and 'linux.iso' in it, we have a match! # The volume number and real extension of a (obfuscated) rar file # so volnrext['dfakjldfalkjdfl.blabla'] = (14, 'part014.rar') or (2, 'r000') # Not really needed, but handy to avoid a second lookup at the renaming volnrext = {} # Scan rar files in workdir, but not subdirs workdir_files = os.listdir(nzo.download_path) for file_to_check in workdir_files: file_to_check = os.path.join(nzo.download_path, file_to_check) # We only want files: if not (os.path.isfile(file_to_check)): continue if rarfile.is_rarfile(file_to_check): # if a rar file is fully encrypted, rarfile.RarFile() will return no contents: fully_encrypted = rarfile.RarFile(file_to_check, single_file_check=True).filelist() == [] if fully_encrypted: logging.info( ""Download %s contains a fully encrypted & obfuscated rar-file: %s."", nzo.final_name, file_to_check, ) # bail out return renamed_files # The function will check if it's a RAR-file # We do a sanity-check for the returned number rar_vol, new_extension = rarvolinfo.get_rar_extension(file_to_check) if 0 < rar_vol < 1000: logging.debug(""Detected volume-number %s from RAR-header: %s "", rar_vol, file_to_check) volnrext[file_to_check] = (rar_vol, new_extension) # The files inside rar file rar_contents = rarfile.RarFile( os.path.join(nzo.download_path, file_to_check), single_file_check=True ).filelist() try: rarvolnr[rar_vol] except: # does not yet exist, so create: rarvolnr[rar_vol] = {} rarvolnr[rar_vol][file_to_check] = rar_contents # store them for matching (if needed) else: logging.debug(""No RAR-volume-number found in %s"", file_to_check) logging.debug(""Deobfuscate: rarvolnr is: %s"", rarvolnr) logging.debug(""Deobfuscate: volnrext is: %s"", volnrext) # Could be that there are no rar-files, we stop if not len(rarvolnr): return renamed_files # this can probably done with a max-key-lambda oneliner, but ... how? numberofrarsets = 0 for mykey in rarvolnr.keys(): numberofrarsets = max(numberofrarsets, len(rarvolnr[mykey])) logging.debug(""Number of rarset is %s"", numberofrarsets) if numberofrarsets == 1: # Just one obfuscated rarset ... that's easy logging.debug(""Deobfuscate: Just one obfuscated rarset"") for filename in volnrext: new_rar_name = ""%s.%s"" % (nzo.final_name, volnrext[filename][1]) new_rar_name = os.path.join(nzo.download_path, new_rar_name) new_rar_name = get_unique_filename(new_rar_name) logging.debug(""Deobfuscate: Renaming %s to %s"" % (filename, new_rar_name)) renamer(filename, new_rar_name) renamed_files += 1 return renamed_files # numberofrarsets bigger than 1, so a mixed rar set, so we need pre-checking # Sanity check of the rar set # Get the highest rar part number (that's the upper limit): highest_rar = sorted(rarvolnr.keys())[-1] # A staircase check: number of rarsets should no go up, but stay the same or go down how_many_previous = 1000 # 1000 rarset mixed ... should be enough ... typical is 1, 2 or maybe 3 # Start at part001.rar and go the highest for rar_set_number in range(1, highest_rar + 1): try: how_many_here = len(rarvolnr[rar_set_number]) except: # rarset does not exist at all logging.warning(""rarset %s is missing completely, so I can't deobfuscate."", rar_set_number) return 0 # OK, it exists, now let's check it's not higher if how_many_here > how_many_previous: # this should not happen: higher number of rarset than previous number of rarset logging.warning(""no staircase! rarset %s is higher than previous, so I can't deobfuscate."", rar_set_number) return 0 how_many_previous = how_many_here # OK, that looked OK (a declining staircase), so we can safely proceed # More than one obfuscated rarset, so we must do matching based of files inside the rar files # Assign (random) rar set names, first come first serve basis rarsetname = {} # in which rar set it should be, so rar set 'A', or 'B', or ... mychar = ""A"" # First things first: Assigning a rarsetname to the rar file which have volume number 1 for base_obfuscated_filename in rarvolnr[1]: rarsetname[base_obfuscated_filename] = mychar + ""--"" + nzo.final_name mychar = chr(ord(mychar) + 1) logging.debug(""Deobfuscate: rarsetname %s"", rarsetname) # Do the matching, layer by layer (read: rarvolnumber) # So, all rar files with rarvolnr 1, find the contents (files inside the rar), # and match with rarfiles with rarvolnr 2, and put them in the correct rarset. # And so on, until the highest rarvolnr minus 1 matched against highest rarvolnr for n in range(1, len(rarvolnr)): logging.debug(""Deobfuscate: Finding matches between rar sets %s and %s"" % (n, n + 1)) for base_obfuscated_filename in rarvolnr[n]: matchcounter = 0 for next_obfuscated_filename in rarvolnr[n + 1]: # set() method with intersection (less strict): set(rarvolnr[n][base_obfuscated_filename]).intersection(set(rarvolnr[n+1][next_obfuscated_filename])) # check if the last filename inside the existing rar matches with the first filename in the following rar if rarvolnr[n][base_obfuscated_filename][-1] == rarvolnr[n + 1][next_obfuscated_filename][0]: try: rarsetname[next_obfuscated_filename] = rarsetname[base_obfuscated_filename] matchcounter += 1 except KeyError: logging.warning(T(""No matching earlier rar file for %s""), next_obfuscated_filename) if matchcounter > 1: logging.info(""Deobfuscate: more than one match, so risk on false positive matching."") # Do the renaming: for filename in rarsetname: new_rar_name = ""%s.%s"" % (rarsetname[filename], volnrext[filename][1]) new_rar_name = os.path.join(nzo.download_path, new_rar_name) new_rar_name = get_unique_filename(new_rar_name) logging.debug(""Deobfuscate: Renaming %s to %s"" % (filename, new_rar_name)) renamer(filename, new_rar_name) renamed_files += 1 # Done: The obfuscated rar files have now been renamed to regular formatted filenames return renamed_files " 32355,"def malware_query_filter(needs_attention: str, malware_type: str, malware_status: str, time_stamp: str, limit_range: int) -> dict: query = [] if bool(needs_attention) is True: query.append({""fieldName"": ""needsAttention"", ""operator"": ""Is"", ""values"": [bool(needs_attention)]}) if bool(malware_type) is True: types = malware_type.split("","") query.append({""fieldName"": ""type"", ""operator"": ""Equals"", ""values"": types}) if bool(malware_status) is True: is_status = malware_status.split("","") query.append({""fieldName"": ""status"", ""operator"": ""Equals"", ""values"": is_status}) if bool(time_stamp) is True: query.append({""fieldName"": ""timestamp"", ""operator"": ""GreaterThan"", ""values"": [int(time_stamp)]}) response = malware_query(query, limit_range) return response ","def malware_query_filter(needs_attention: str, malware_type: str, malware_status: str, time_stamp: str, limit_range: int) -> dict: query = [] if needs_attention: query.append({""fieldName"": ""needsAttention"", ""operator"": ""Is"", ""values"": [bool(needs_attention)]}) if bool(malware_type) is True: types = malware_type.split("","") query.append({""fieldName"": ""type"", ""operator"": ""Equals"", ""values"": types}) if bool(malware_status) is True: is_status = malware_status.split("","") query.append({""fieldName"": ""status"", ""operator"": ""Equals"", ""values"": is_status}) if bool(time_stamp) is True: query.append({""fieldName"": ""timestamp"", ""operator"": ""GreaterThan"", ""values"": [int(time_stamp)]}) response = malware_query(query, limit_range) return response " 31382,"def create_filter_list_assets(asset_type: str, project: str, filter_string: str, active_assets_only: str) -> str: """""" creating common filter query string for ""list findings"" API based on various filter parameter. :param asset_type: type filter :param filter_string: filter dict :param project: project filter :param active_assets_only: lifeCycleState filter :return: filter query string """""" if filter_string is None: filter_string = """" if project: project_list: list = project.split("","") filter_string = add_filter(""resourceProperties.name"", filter_string, project_list) if asset_type: type_list: list = asset_type.split("","") filter_string = add_filter(""securityCenterProperties.resourceType"", filter_string, type_list) if active_assets_only.lower() == ""true"": if filter_string: filter_string += ' AND ' filter_string += 'resourceProperties.lifecycleState=""ACTIVE""' return filter_string ","def create_filter_list_assets(asset_type: str, project: str, filter_string: str, active_assets_only: str) -> str: """""" creating common filter query string for ""list findings"" API based on various filter parameter. :param asset_type: type filter :param filter_string: filter dict :param project: project filter :param active_assets_only: lifeCycleState filter :return: filter query string """""" if filter_string is None: filter_string = """" if project: project_list: list = project.split("","") filter_string = add_filter(""resourceProperties.name"", filter_string, project_list) if asset_type: type_list: list = asset_type.split("","") filter_string = add_filter(""securityCenterProperties.resourceType"", filter_string, type_list) if active_assets_only.lower() == ""true"": filter_string = add_filter('resourceProperties.lifecycleState', filter_string, ['ACTIVE']) return filter_string " 25413,"def is_safe_for_strict_config(value: Any) -> bool: """"""Checks ``value`` agaisn't a stricter ruleset which will be enforced in a future iteration on ``Config``. Parameters ---------- value: Any The object to be checked. Returns ------- bool Whether or not ``value`` respect the stricter data boundaries. """""" return not _is_unsafe_on_strict_config(value) ","def is_safe_for_strict_config(value: Any) -> bool: """"""Checks ``value`` against a stricter ruleset which will be enforced in a future iteration on ``Config``. Parameters ---------- value: Any The object to be checked. Returns ------- bool Whether or not ``value`` respect the stricter data boundaries. """""" return not _is_unsafe_on_strict_config(value) " 31468,"def option_handler(): """"""Validates and parses script arguments. Returns: Namespace: Parsed arguments object. """""" parser = argparse.ArgumentParser(description=""Store packs in cloud storage."") # disable-secrets-detection-start parser.add_argument('-b', '--public_bucket_name', help=""CI public bucket name"", required=True) parser.add_argument('-s', '--service_account', help=(""Path to gcloud service account, is for circleCI usage. "" ""For local development use your personal account and "" ""authenticate using Google Cloud SDK by running: "" ""`gcloud auth application-default login` and leave this parameter blank. "" ""For more information go to: "" ""https://googleapis.dev/python/google-api-core/latest/auth.html""), required=False) parser.add_argument('-sb', '--storage_base_path', help=""Storage base path of the bucket."", required=False) # disable-secrets-detection-end return parser.parse_args() ","def option_handler(): """"""Validates and parses script arguments. Returns: Namespace: Parsed arguments object. """""" parser = argparse.ArgumentParser(description=""Store packs in cloud storage."") # disable-secrets-detection-start parser.add_argument('-b', '--public_bucket_name', help=""CI public bucket name"", required=True) parser.add_argument('-s', '--service_account', help=(""Path to gcloud service account, is for circleCI usage. "" ""For local development use your personal account and "" ""authenticate using Google Cloud SDK by running: "" ""`gcloud auth application-default login` and leave this parameter blank. "" ""For more information go to: "" ""https://googleapis.dev/python/google-api-core/latest/auth.html""), required=False) parser.add_argument('-sp', '--storage_base_path', help=""Storage base path of the bucket."", required=False) # disable-secrets-detection-end return parser.parse_args() " 5400,"def dir_exists(path, saltenv=""base""): """""" Return ``True`` if a directory exists in the state tree, ``False`` otherwise. :param str path: The fully qualified path to a directory in the state tree. :param str saltenv: The fileserver environment to search. Default: ``base`` CLI Example: .. code-block:: bash salt '*' slsutil.bir_exists nginx/files """""" _set_context( [CONTEXT_BASE, saltenv, ""dir_list""], __salt__[""cp.list_master_dirs""], [saltenv] ) return path in __context__[CONTEXT_BASE][saltenv][""dir_list""] ","def dir_exists(path, saltenv=""base""): """""" Return ``True`` if a directory exists in the state tree, ``False`` otherwise. :param str path: The fully qualified path to a directory in the state tree. :param str saltenv: The fileserver environment to search. Default: ``base`` CLI Example: .. code-block:: bash salt '*' slsutil.dir_exists nginx/files """""" _set_context( [CONTEXT_BASE, saltenv, ""dir_list""], __salt__[""cp.list_master_dirs""], [saltenv] ) return path in __context__[CONTEXT_BASE][saltenv][""dir_list""] " 32482,"def get_time_range(last_fetch: Union[str, None] = None, fetch_time=FETCH_TIME, time_range_start=TIME_RANGE_START, time_range_end=TIME_RANGE_END, time_field=TIME_FIELD) -> Dict: """""" Creates the time range filter's dictionary based on the last fetch and given params. Args: last_fetch (str): last fetch time stamp fetch_time (str): first fetch time time_range_start (str): start of time range time_range_end (str): end of time range time_field (): Returns: dictionary (Ex. {""range"":{'gt': 1000 'lt':1001}}) """""" range_dict = {} if not last_fetch: if time_range_start: start_date = dateparser.parse(time_range_start) elif fetch_time: start_date = dateparser.parse(fetch_time) else: raise DemistoException(""Missing First fetch timestamp and Time Range Start, please provide one of them."") start_time = get_timestamp_first_fetch(start_date) else: start_time = last_fetch range_dict['gt'] = start_time if time_range_end: end_date = dateparser.parse(time_range_end) end_time = get_timestamp_first_fetch(end_date) range_dict['lt'] = end_time return {'range': {time_field: range_dict}} ","def get_time_range(last_fetch: Union[str, None] = None, fetch_time=FETCH_TIME, time_range_start=TIME_RANGE_START, time_range_end=TIME_RANGE_END, time_field=TIME_FIELD) -> Dict: """""" Creates the time range filter's dictionary based on the last fetch and given params. Args: last_fetch (str): last fetch time stamp fetch_time (str): first fetch time time_range_start (str): start of time range time_range_end (str): end of time range time_field (): Returns: dictionary (Ex. {""range"":{'gt': 1000 'lt':1001}}) """""" range_dict = {} if not last_fetch: if time_range_start: start_date = dateparser.parse(time_range_start) elif fetch_time: start_date = dateparser.parse(fetch_time) else: raise DemistoException(""Missing First fetch timestamp or Time Range Start, please provide one of them."") start_time = get_timestamp_first_fetch(start_date) else: start_time = last_fetch range_dict['gt'] = start_time if time_range_end: end_date = dateparser.parse(time_range_end) end_time = get_timestamp_first_fetch(end_date) range_dict['lt'] = end_time return {'range': {time_field: range_dict}} " 44607,"def create_sparse_tensor_features(sparse_tensor, name='adj'): feature_dict = {} val = sparse_tensor.values.numpy() ind = sparse_tensor.indices.numpy() shape = sparse_tensor.dense_shape.numpy() feature_dict[name + '_val'] = tf.train.Feature( float_list=tf.train.FloatList(value=val)) for i in range(ind.shape[-1]): feature_dict[name + '_ind_' + str(i)] = tf.train.Feature( int64_list=tf.train.Int64List(value=ind[:, i])) return feature_dict ","def create_sparse_tensor_features(sparse_tensor, name='adj'): feature_dict = {} val = sparse_tensor.values.numpy() ind = sparse_tensor.indices.numpy() shape = sparse_tensor.dense_shape.numpy() feature_dict['{}_val'.format(name)] = tf.train.Feature( float_list=tf.train.FloatList(value=val)) for i in range(ind.shape[-1]): feature_dict[name + '_ind_' + str(i)] = tf.train.Feature( int64_list=tf.train.Int64List(value=ind[:, i])) return feature_dict " 31689,"def addAttachmentFile_command(client, args): entry_id = demisto.args()['EntryID'] attachmentDesc = args.get('attachmentDesc') sourceFileName = args.get('sourceFileName') ticketId = args.get('ticketId') phaseName = args.get('phaseName') file_path = demisto.getFilePath(entry_id).get('path') f = open(file_path, ""rb"") data = f.read() response = client.service.addAttachmentFile( attachmentDesc=attachmentDesc, sourceFileName=sourceFileName, attachmentData=data, ticketId=ticketId, phaseName=phaseName) attachment = {} attachment['id'] = response attachment['ticketId'] = ticketId attachment['EntryID'] = entry_id command_results = CommandResults( outputs_prefix='Skybox.addAttachmentFile', outputs_key_field='', outputs=attachment, raw_response=attachment ) return command_results ","def addAttachmentFile_command(client, args): entry_id = args.get('EntryID') attachmentDesc = args.get('attachmentDesc') sourceFileName = args.get('sourceFileName') ticketId = args.get('ticketId') phaseName = args.get('phaseName') file_path = demisto.getFilePath(entry_id).get('path') f = open(file_path, ""rb"") data = f.read() response = client.service.addAttachmentFile( attachmentDesc=attachmentDesc, sourceFileName=sourceFileName, attachmentData=data, ticketId=ticketId, phaseName=phaseName) attachment = {} attachment['id'] = response attachment['ticketId'] = ticketId attachment['EntryID'] = entry_id command_results = CommandResults( outputs_prefix='Skybox.addAttachmentFile', outputs_key_field='', outputs=attachment, raw_response=attachment ) return command_results " 4397,"def _check_dict_keys(user_dict, valid_keys, dict_name=""Channel name(s)"", valid_name=""info""): """"""Check that the keys in dictionary are valid against a set list. Return the input dictionary if it is valid, otherwise raise a ValueError with a readable error message. Parameters ---------- user_dict : dict The name of the parameter to check. This is used in the error message. valid_keys : list All possible valid key names. Raises ------ ValueError When the key of the dict is not one of the valid options. Returns ------- user_dict When the keys are deemed acceptable the dictionary is returned. """""" sorted_dict = sorted(list(user_dict)) missing = [val not in valid_keys for val in sorted_dict] if any(missing): raise ValueError( f""{dict_name} is missing from {valid_name}: "" f""{np.array(sorted_dict)[np.array(missing)]}"") return user_dict ","def _check_dict_keys(user_dict, valid_keys, dict_name=""Channel name(s)"", valid_name=""info""): """"""Check that the keys in dictionary are valid against a set list. Return the input dictionary if it is valid, otherwise raise a ValueError with a readable error message. Parameters ---------- user_dict : dict The name of the parameter to check. This is used in the error message. valid_keys : list All possible valid key names. Raises ------ ValueError When the key of the dict is not one of the valid options. Returns ------- user_dict When the keys are deemed acceptable the dictionary is returned. """""" sorted_dict = sorted(list(user_dict)) missing = [val not in valid_keys for val in sorted_dict] if len(missing): raise ValueError( f""{dict_name} is missing from {valid_name}: "" f""{np.array(sorted_dict)[np.array(missing)]}"") return user_dict " 24723,"def sync_send_wrapper(wrapped, instance, args, kwargs): transaction = current_transaction() request = bind_request(*args, **kwargs) connection = instance with ExternalTrace(""httpx"", str(request.url), request.method) as tracer: # Add the tracer to the connection object. The tracer will be # used in getresponse() to add back into the external trace, # after the trace has already completed, details from the # response headers. if hasattr(tracer, ""generate_request_headers""): outgoing_headers = tracer.generate_request_headers(transaction) for header_name, header_value in outgoing_headers: # User headers should override our CAT headers if not header_name in request.headers: request.headers[header_name] = header_value connection._nr_external_tracer = tracer return wrapped(*args, **kwargs) ","def sync_send_wrapper(wrapped, instance, args, kwargs): transaction = current_transaction() request = bind_request(*args, **kwargs) connection = instance with ExternalTrace(""httpx"", str(request.url), request.method) as tracer: # Add the tracer to the connection object. The tracer will be # used in getresponse() to add back into the external trace, # after the trace has already completed, details from the # response headers. if hasattr(tracer, ""generate_request_headers""): outgoing_headers = tracer.generate_request_headers(transaction) for header_name, header_value in outgoing_headers: # User headers should override our CAT headers if header_name not in request.headers: request.headers[header_name] = header_value connection._nr_external_tracer = tracer return wrapped(*args, **kwargs) " 10776,"def push_call_vars(blocks, saved_globals, saved_getattrs, typemap, nested=False): """"""push call variables to right before their call site. assuming one global/getattr is created for each call site and control flow doesn't change it. """""" for block in blocks.values(): new_body = [] # global/attr variables that are defined in this block already, # no need to reassign them block_defs = set() # Some definitions are copied right before the call but then we # need to rename that symbol in that block so that typing won't # generate an error trying to lock the save var twice. # In rename_dict, we collect the symbols that must be renamed in # this block. We collect them then apply the renaming at the end. rename_dict = {} for stmt in block.body: def process_assign(stmt): if isinstance(stmt, ir.Assign): rhs = stmt.value lhs = stmt.target if (isinstance(rhs, ir.Global)): saved_globals[lhs.name] = stmt block_defs.add(lhs.name) elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr': if (rhs.value.name in saved_globals or rhs.value.name in saved_getattrs): saved_getattrs[lhs.name] = stmt block_defs.add(lhs.name) if not nested and isinstance(stmt, Parfor): for s in stmt.init_block.body: process_assign(s) pblocks = stmt.loop_body.copy() push_call_vars(pblocks, saved_globals, saved_getattrs, typemap, nested=True) new_body.append(stmt) continue else: process_assign(stmt) for v in stmt.list_vars(): new_body += _get_saved_call_nodes(v.name, saved_globals, saved_getattrs, block_defs, rename_dict) new_body.append(stmt) block.body = new_body # If there is anything to rename then apply the renaming here. if len(rename_dict) > 0: # Fix-up the typing for the renamed vars. for k,v in rename_dict.items(): typemap[v] = typemap[k] # This is only to call replace_var_names which takes a dict. temp_blocks = {0:block} replace_var_names(temp_blocks, rename_dict) return ","def push_call_vars(blocks, saved_globals, saved_getattrs, typemap, nested=False): """"""push call variables to right before their call site. assuming one global/getattr is created for each call site and control flow doesn't change it. """""" for block in blocks.values(): new_body = [] # global/attr variables that are defined in this block already, # no need to reassign them block_defs = set() # Some definitions are copied right before the call but then we # need to rename that symbol in that block so that typing won't # generate an error trying to lock the save var twice. # In rename_dict, we collect the symbols that must be renamed in # this block. We collect them then apply the renaming at the end. rename_dict = {} for stmt in block.body: def process_assign(stmt): if isinstance(stmt, ir.Assign): rhs = stmt.value lhs = stmt.target if (isinstance(rhs, ir.Global)): saved_globals[lhs.name] = stmt block_defs.add(lhs.name) elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr': if (rhs.value.name in saved_globals or rhs.value.name in saved_getattrs): saved_getattrs[lhs.name] = stmt block_defs.add(lhs.name) if not nested and isinstance(stmt, Parfor): for s in stmt.init_block.body: process_assign(s) pblocks = stmt.loop_body.copy() push_call_vars(pblocks, saved_globals, saved_getattrs, typemap, nested=True) new_body.append(stmt) continue else: process_assign(stmt) for v in stmt.list_vars(): new_body += _get_saved_call_nodes(v.name, saved_globals, saved_getattrs, block_defs, rename_dict) new_body.append(stmt) block.body = new_body # If there is anything to rename then apply the renaming here. if len(rename_dict) > 0: # Fix-up the typing for the renamed vars. for k, v in rename_dict.items(): typemap[v] = typemap[k] # This is only to call replace_var_names which takes a dict. temp_blocks = {0:block} replace_var_names(temp_blocks, rename_dict) return " 58862,"def format_float_positional(a, *args, **kwargs): """"""Returns the decimal string in positional notation .. seealso:: :func:`numpy.format_float_positional` """""" return numpy.format_float_positional(cupy.asnumpy(a), *args, **kwargs) ","def format_float_positional(x, *args, **kwargs): """"""Format a floating-point scalar as a decimal string in positional notation. See :func:`numpy.format_float_positional` for the list of arguments. .. seealso:: :func:`numpy.format_float_positional` """""" return numpy.format_float_positional(cupy.asnumpy(a), *args, **kwargs) " 32111,"def create_intel_command(client: Client, args: Dict[str, Any]) -> Dict: """""" create_intel command: Creates Intel in CTIX """""" data = { ""ips"": args.get(""ips"", []), ""urls"": args.get(""urls"", []), ""domains"": args.get(""domains"", []), ""files"": args.get(""files"", []), ""emails"": args.get(""emails"", []), ""malwares"": args.get(""malwares"", []), ""threat_actors"": args.get(""threat_actors"", []), ""attack_patterns"": args.get(""attack_patterns"", []), ""title"": args.get(""title"", None), ""description"": args.get(""description"", None), ""confidence"": args.get(""confidence"", None), ""tlp"": args.get(""tlp"", None), } create_intel_response = client.create_intel(data) return create_intel_response ","def create_intel_command(client: Client, args: Dict[str, Any]) -> Dict: """""" create_intel command: Creates Intel in CTIX """""" data = { ""ips"": args.get(""ips"", []), ""urls"": args.get(""urls"", []), ""domains"": args.get(""domains"", []), ""files"": args.get(""files"", []), ""emails"": args.get(""emails"", []), ""malwares"": args.get(""malwares"", []), ""threat_actors"": args.get(""threat_actors"", []), ""attack_patterns"": args.get(""attack_patterns"", []), ""title"": args.get(""title"", None), ""description"": args.get(""description"", None), ""confidence"": args.get(""confidence""), ""tlp"": args.get(""tlp"", None), } create_intel_response = client.create_intel(data) return create_intel_response " 39912,"def test_ursula_init_with_local_keystore_signer(click_runner, custom_filepath, custom_config_filepath, mocker, mock_testerchain, worker_account, test_registry_source_manager): # Good signer... pre_config_signer = KeystoreSigner.from_signer_uri(uri=MOCK_SIGNER_URI) init_args = ('ursula', 'init', '--network', TEMPORARY_DOMAIN, '--worker-address', worker_account.address, '--config-root', custom_filepath, '--provider', TEST_PROVIDER_URI, '--rest-host', MOCK_IP_ADDRESS, '--rest-port', MOCK_URSULA_STARTING_PORT, # The bit were' testing here '--signer', MOCK_SIGNER_URI) result = click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False, env=CLI_ENV) assert result.exit_code == 0, result.stdout # Inspect the configuration file for the signer URI with open(custom_config_filepath, 'r') as config_file: raw_config_data = config_file.read() config_data = json.loads(raw_config_data) assert config_data['signer_uri'] == MOCK_SIGNER_URI,\ ""Keystore URI was not correctly included in configuration file"" # Recreate a configuration with the signer URI preserved ursula_config = UrsulaConfiguration.from_configuration_file(custom_config_filepath) assert ursula_config.signer_uri == MOCK_SIGNER_URI # Mock decryption of web3 client keyring mocker.patch.object(Account, 'decrypt', return_value=worker_account.privateKey) ursula_config.attach_keyring(checksum_address=worker_account.address) ursula_config.keyring.unlock(password=INSECURE_DEVELOPMENT_PASSWORD) # Produce an ursula with a Keystore signer correctly derived from the signer URI, and dont do anything else! mocker.patch.object(StakeList, 'refresh', autospec=True) ursula = ursula_config.produce(client_password=INSECURE_DEVELOPMENT_PASSWORD, block_until_ready=False) # Verify the keystore path is still preserved assert isinstance(ursula.signer, KeystoreSigner) assert isinstance(ursula.signer.path, Path), ""Use Pathlib"" assert ursula.signer.path == Path(MOCK_KEYSTORE_PATH) # confirm Pathlib is used internally despite string input # Show that we can produce the exact same signer as pre-config... assert pre_config_signer.path == ursula.signer.path ","def test_ursula_init_with_local_keystore_signer(click_runner, custom_filepath, custom_config_filepath, mocker, mock_testerchain, worker_account, test_registry_source_manager): # Good signer... pre_config_signer = KeystoreSigner.from_signer_uri(uri=MOCK_SIGNER_URI) init_args = ('ursula', 'init', '--network', TEMPORARY_DOMAIN, '--worker-address', worker_account.address, '--config-root', custom_filepath, '--provider', TEST_PROVIDER_URI, '--rest-host', MOCK_IP_ADDRESS, '--rest-port', MOCK_URSULA_STARTING_PORT, # The bit we're testing here '--signer', MOCK_SIGNER_URI) result = click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False, env=CLI_ENV) assert result.exit_code == 0, result.stdout # Inspect the configuration file for the signer URI with open(custom_config_filepath, 'r') as config_file: raw_config_data = config_file.read() config_data = json.loads(raw_config_data) assert config_data['signer_uri'] == MOCK_SIGNER_URI,\ ""Keystore URI was not correctly included in configuration file"" # Recreate a configuration with the signer URI preserved ursula_config = UrsulaConfiguration.from_configuration_file(custom_config_filepath) assert ursula_config.signer_uri == MOCK_SIGNER_URI # Mock decryption of web3 client keyring mocker.patch.object(Account, 'decrypt', return_value=worker_account.privateKey) ursula_config.attach_keyring(checksum_address=worker_account.address) ursula_config.keyring.unlock(password=INSECURE_DEVELOPMENT_PASSWORD) # Produce an ursula with a Keystore signer correctly derived from the signer URI, and dont do anything else! mocker.patch.object(StakeList, 'refresh', autospec=True) ursula = ursula_config.produce(client_password=INSECURE_DEVELOPMENT_PASSWORD, block_until_ready=False) # Verify the keystore path is still preserved assert isinstance(ursula.signer, KeystoreSigner) assert isinstance(ursula.signer.path, Path), ""Use Pathlib"" assert ursula.signer.path == Path(MOCK_KEYSTORE_PATH) # confirm Pathlib is used internally despite string input # Show that we can produce the exact same signer as pre-config... assert pre_config_signer.path == ursula.signer.path " 44196,"def factorize(two, tol): r""""""Return double-factorized form of a two-electron tensor. The second quantized electronic Hamiltonian is constructed in terms of fermionic creation, :math:`a^{\dagger}` , and annihilation, :math:`a`, operators as [`arXiv:1902.02134 `_] .. math:: H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger} a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs} h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha}, where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as .. math:: h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right) \phi_q(r) dr, and .. math:: h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|} dr_1 dr_2. Rearranging the integrals in the chemist notation, [11|22], gives .. math:: H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger} a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs} V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}. with .. math:: T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}. and :math:`V` is the two-electron tensor in chemist notation. The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that .. math:: V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}. with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized and truncated in a second level of factorization. The algorithm has the following steps [`arXiv:1902.02134 `_]. 1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \ :math:`n^2 \times n^2` matrix where n is the number of orbitals. 2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \ corresponding eigenvalues larger than a threshold. 3. Reshape the selected eigenvectors to :math:`n \times n` matrices. 4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \ eigenvalues is larger than a threshold. Args: two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis arranged in chemist notation [11|22] tol (float): cutoff value for discarding the negligible factors Returns: tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron tensor, eigenvalues of the generated factors, eigenvectors of the generated factors **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772 >>> mol = qml.qchem.Molecule(symbols, geometry) >>> core, one, two = qml.qchem.electron_integrals(mol)() >>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation >>> l, w, v = factorize(two, 1e-5) >>> print(l) [[[ 1.06723440e-01 9.73575768e-15] [ 8.36288956e-15 -1.04898533e-01]] [[-2.20945401e-13 -4.25688222e-01] [-4.25688222e-01 -2.98228790e-13]] [[-8.14472856e-01 5.01669019e-13] [ 5.01689072e-13 -8.28642140e-01]]] """""" n = two.shape[0] two = two.reshape(n * n, n * n) eigvals, eigvecs = np.linalg.eigh(two) eigvals = np.array([val for val in eigvals if abs(val) > tol]) eigvecs = eigvecs[:, -len(eigvals) :] vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals))) factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))]) eigvals, eigvecs = np.linalg.eigh(factors) eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol]) eigvecs = eigvecs[:, -len(eigvals) :] return factors, eigvals, eigvecs ","def factorize(two, tol): r""""""Return double-factorized form of a two-electron tensor. The second quantized electronic Hamiltonian is constructed in terms of fermionic creation, :math:`a^{\dagger}` , and annihilation, :math:`a`, operators as [`arXiv:1902.02134 `_] .. math:: H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger} a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs} h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha}, where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as .. math:: h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right) \phi_q(r) dr, and .. math:: h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|} dr_1 dr_2. Rearranging the integrals in the chemist notation, [11|22], gives .. math:: H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger} a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs} V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}. with .. math:: T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}. and :math:`V` is the two-electron tensor in chemist notation. The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that .. math:: V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}. with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized and truncated in a second level of factorization. The algorithm has the following steps [`arXiv:1902.02134 `_]. 1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \ :math:`n^2 \times n^2` matrix where n is the number of orbitals. 2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \ corresponding eigenvalues larger than a threshold. 3. Reshape the selected eigenvectors to :math:`n \times n` matrices. 4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \ eigenvalues is larger than a threshold. Args: two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis arranged in chemist notation [11|22] tol (float): cutoff value for discarding the negligible factors Returns: tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron tensor, eigenvalues of the generated factors, eigenvectors of the generated factors **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772 >>> mol = qml.qchem.Molecule(symbols, geometry) >>> core, one, two = qml.qchem.electron_integrals(mol)() >>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation >>> l, w, v = factorize(two, 1e-5) >>> print(l) [[[ 1.06723440e-01 9.73575768e-15] [ 8.36288956e-15 -1.04898533e-01]] [[-2.20945401e-13 -4.25688222e-01] [-4.25688222e-01 -2.98228790e-13]] [[-8.14472856e-01 5.01669019e-13] [ 5.01689072e-13 -8.28642140e-01]]] """""" n = two.shape[0] two = two.reshape(n * n, n * n) eigvals, eigvecs = np.linalg.eigh(two) eigvals = np.array([val for val in eigvals if abs(val) > tol]) eigvecs = eigvecs[:, -len(eigvals) :] vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals))) factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))]) eigvals, eigvecs = np.linalg.eigh(factors) eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol]) eigvecs = eigvecs[:, -len(eigvals) :] return factors, eigvals, eigvecs " 29874,"def run_combiner(sample_paths: List[str], out_file: str, tmp_path: str, intervals: Optional[List[hl.utils.Interval]] = None, header: Optional[str] = None, sample_names: Optional[List[str]] = None, branch_factor: int = CombinerConfig.default_branch_factor, batch_size: int = CombinerConfig.default_batch_size, target_records: int = CombinerConfig.default_target_records, import_interval_size: Optional[int] = None, use_genome_default_intervals: bool = False, use_exome_default_intervals: bool = False, overwrite: bool = False, reference_genome: str = 'default', contig_recoding: Optional[Dict[str, str]] = None, key_by_locus_and_alleles: bool = False): """"""Run the Hail VCF combiner, performing a hierarchical merge to create a combined sparse matrix table. **Partitioning** The partitioning of input GVCFs is determined the four parameters below, one of which must be passed to this function: - `intervals` -- User-supplied intervals. - `import_interval_size` -- Use intervals of this uniform size across the genome. - `use_genome_default_intervals` -- Use intervals of typical uniform size for whole genome GVCFs. - `use_exome_default_intervals` -- Use intervals of typical uniform size for exome GVCFs. It is recommended that new users include either `use_genome_default_intervals` or `use_exome_default_intervals`. Parameters ---------- sample_paths : :obj:`list` of :obj:`str` Paths to individual GVCFs. out_file : :obj:`str` Path to final combined matrix table. tmp_path : :obj:`str` Path for intermediate output. intervals : list of :class:`.Interval` or None Partitioning with which to import GVCFs in first phase of combiner. header : :obj:`str` or None External header file to use as GVCF header for all inputs. If defined, `sample_names` must be defined as well. sample_names: list of :obj:`str` or None Sample names, to be used with `header`. branch_factor : :obj:`int` Combiner branch factor. batch_size : :obj:`int` Combiner batch size. target_records : :obj:`int` Target records per partition in each combiner phase after the first. import_interval_size : :obj:`int` or None The target interval size to partition the reference into intervals for importing GVCFs. use_genome_default_intervals : :obj:`bool` The input GVCFs are genomes, if this is false, they are assumed to be exomes. If `import_interval_size` is not None, this parameter is ignored. use_exome_default_intervals : :obj:`bool` The input GVCFs are genomes, if this is false, they are assumed to be exomes. If `import_interval_size` is not None, this parameter is ignored. overwrite : :obj:`bool` Overwrite output file, if it exists. reference_genome : :obj:`str` Reference genome for GVCF import. contig_recoding: :obj:`dict` of (:obj:`str`, :obj:`str`), optional Mapping from contig name in gVCFs to contig name the reference genome. All contigs must be present in the `reference_genome`, so this is useful for mapping differently-formatted data onto known references. key_by_locus_and_alleles : :obj:`bool` Key by both locus and alleles in the final output. Returns ------- None """""" tmp_path += f'/combiner-temporary/{uuid.uuid4()}/' if header is not None: assert sample_names is not None assert len(sample_names) == len(sample_paths) n_partition_args = (int(intervals is not None) + int(import_interval_size is not None) + int(use_genome_default_intervals) + int(use_exome_default_intervals)) if n_partition_args == 0: raise ValueError(""'run_combiner': require one argument from 'intervals', 'import_interval_size', "" ""'use_genome_default_intervals', or 'use_exome_default_intervals' to choose GVCF partitioning"") if n_partition_args > 0: warning(""'run_combiner': multiple colliding arguments found from 'intervals', 'import_interval_size', "" ""'use_genome_default_intervals', or 'use_exome_default_intervals'."" ""\n The argument found first in the list in this warning will be used, and others ignored."") if intervals is not None: info(f""Using {len(intervals)} user-supplied intervals as partitioning for GVCF import"") elif import_interval_size is not None: intervals = calculate_even_genome_partitioning(reference_genome, import_interval_size) info(f""Using {len(intervals)} intervals with user-supplied size"" f"" {import_interval_size} as partitioning for GVCF import"") elif use_genome_default_intervals: size = CombinerConfig.default_genome_interval_size intervals = calculate_even_genome_partitioning(reference_genome, size) info(f""Using {len(intervals)} intervals with default whole-genome size"" f"" {import_interval_size} as partitioning for GVCF import"") elif use_exome_default_intervals: size = CombinerConfig.default_exome_interval_size intervals = calculate_even_genome_partitioning(reference_genome, size) info(f""Using {len(intervals)} intervals with default exome size"" f"" {import_interval_size} as partitioning for GVCF import"") assert intervals is not None config = CombinerConfig(branch_factor=branch_factor, batch_size=batch_size, target_records=target_records) plan = config.plan(len(sample_paths)) files_to_merge = sample_paths n_phases = len(plan.phases) total_ops = len(files_to_merge) * n_phases total_work_done = 0 for phase_i, phase in enumerate(plan.phases): phase_i += 1 # used for info messages, 1-indexed for readability n_jobs = len(phase.jobs) merge_str = 'input GVCFs' if phase_i == 1 else 'intermediate sparse matrix tables' job_str = hl.utils.misc.plural('job', n_jobs) info(f""Starting phase {phase_i}/{n_phases}, merging {len(files_to_merge)} {merge_str} in {n_jobs} {job_str}."") if phase_i > 1: intervals = calculate_new_intervals(hl.read_matrix_table(files_to_merge[0]).rows(), config.target_records, reference_genome=reference_genome) new_files_to_merge = [] for job_i, job in enumerate(phase.jobs): job_i += 1 # used for info messages, 1-indexed for readability n_merges = len(job.merges) merge_str = hl.utils.misc.plural('file', n_merges) pct_total = 100 * job.input_total_size / total_ops info( f""Starting phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)} to create {n_merges} merged {merge_str}, corresponding to ~{pct_total:.1f}% of total I/O."") merge_mts: List[MatrixTable] = [] for merge in job.merges: inputs = [files_to_merge[i] for i in merge.inputs] if phase_i == 1: mts = [transform_gvcf(vcf) for vcf in hl.import_gvcfs(inputs, intervals, array_elements_required=False, _external_header=header, _external_sample_ids=[sample_names[i] for i in merge.inputs] if header is not None else None, reference_genome=reference_genome, contig_recoding=contig_recoding)] else: mts = [hl.read_matrix_table(path, _intervals=intervals) for path in inputs] merge_mts.append(combine_gvcfs(mts)) if phase_i == n_phases: # final merge! assert n_jobs == 1 assert len(merge_mts) == 1 [final_mt] = merge_mts if key_by_locus_and_alleles: final_mt = MatrixTable(MatrixKeyRowsBy(final_mt._mir, ['locus', 'alleles'], is_sorted=True)) final_mt.write(out_file, overwrite=overwrite) new_files_to_merge = [out_file] info(f""Finished phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, 100% of total I/O finished."") break tmp = f'{tmp_path}_phase{phase_i}_job{job_i}/' hl.experimental.write_matrix_tables(merge_mts, tmp, overwrite=True) pad = len(str(len(merge_mts))) new_files_to_merge.extend(tmp + str(n).zfill(pad) + '.mt' for n in range(len(merge_mts))) total_work_done += job.input_total_size info( f""Finished {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, {100 * total_work_done / total_ops:.1f}% of total I/O finished."") info(f""Finished phase {phase_i}/{n_phases}."") files_to_merge = new_files_to_merge assert files_to_merge == [out_file] info(""Finished!"") ","def run_combiner(sample_paths: List[str], out_file: str, tmp_path: str, intervals: Optional[List[hl.utils.Interval]] = None, header: Optional[str] = None, sample_names: Optional[List[str]] = None, branch_factor: int = CombinerConfig.default_branch_factor, batch_size: int = CombinerConfig.default_batch_size, target_records: int = CombinerConfig.default_target_records, import_interval_size: Optional[int] = None, use_genome_default_intervals: bool = False, use_exome_default_intervals: bool = False, overwrite: bool = False, reference_genome: str = 'default', contig_recoding: Optional[Dict[str, str]] = None, key_by_locus_and_alleles: bool = False): """"""Run the Hail VCF combiner, performing a hierarchical merge to create a combined sparse matrix table. **Partitioning** The partitioning of input GVCFs is determined the four parameters below, one of which must be passed to this function: - `intervals` -- User-supplied intervals. - `import_interval_size` -- Use intervals of this uniform size across the genome. - `use_genome_default_intervals` -- Use intervals of typical uniform size for whole genome GVCFs. - `use_exome_default_intervals` -- Use intervals of typical uniform size for exome GVCFs. It is recommended that new users include either `use_genome_default_intervals` or `use_exome_default_intervals`. Parameters ---------- sample_paths : :obj:`list` of :obj:`str` Paths to individual GVCFs. out_file : :obj:`str` Path to final combined matrix table. tmp_path : :obj:`str` Path for intermediate output. intervals : list of :class:`.Interval` or None Partitioning with which to import GVCFs in first phase of combiner. header : :obj:`str` or None External header file to use as GVCF header for all inputs. If defined, `sample_names` must be defined as well. sample_names: list of :obj:`str` or None Sample names, to be used with `header`. branch_factor : :obj:`int` Combiner branch factor. batch_size : :obj:`int` Combiner batch size. target_records : :obj:`int` Target records per partition in each combiner phase after the first. import_interval_size : :obj:`int` or None The target interval size to partition the reference into intervals for importing GVCFs. use_genome_default_intervals : :obj:`bool` The input GVCFs are genomes, if this is false, they are assumed to be exomes. If `import_interval_size` is not None, this parameter is ignored. use_exome_default_intervals : :obj:`bool` The input GVCFs are genomes, if this is false, they are assumed to be exomes. If `import_interval_size` is not None, this parameter is ignored. overwrite : :obj:`bool` Overwrite output file, if it exists. reference_genome : :obj:`str` Reference genome for GVCF import. contig_recoding: :obj:`dict` of (:obj:`str`, :obj:`str`), optional Mapping from contig name in gVCFs to contig name the reference genome. All contigs must be present in the `reference_genome`, so this is useful for mapping differently-formatted data onto known references. key_by_locus_and_alleles : :obj:`bool` Key by both locus and alleles in the final output. Returns ------- None """""" tmp_path += f'/combiner-temporary/{uuid.uuid4()}/' if header is not None: assert sample_names is not None assert len(sample_names) == len(sample_paths) n_partition_args = (int(intervals is not None) + int(import_interval_size is not None) + int(use_genome_default_intervals) + int(use_exome_default_intervals)) if n_partition_args == 0: raise ValueError(""'run_combiner': require one argument from 'intervals', 'import_interval_size', "" ""'use_genome_default_intervals', or 'use_exome_default_intervals' to choose GVCF partitioning"") if n_partition_args > 0: warning(""'run_combiner': multiple colliding arguments found from 'intervals', 'import_interval_size', "" ""'use_genome_default_intervals', or 'use_exome_default_intervals'."" ""\n The argument found first in the list in this warning will be used, and others ignored."") if intervals is not None: info(f""Using {len(intervals)} user-supplied intervals as partitioning for GVCF import"") elif import_interval_size is not None: intervals = calculate_even_genome_partitioning(reference_genome, import_interval_size) info(f""Using {len(intervals)} intervals with user-supplied size"" f"" {import_interval_size} as partitioning for GVCF import"") elif use_genome_default_intervals: size = CombinerConfig.default_genome_interval_size intervals = calculate_even_genome_partitioning(reference_genome, size) info(f""Using {len(intervals)} intervals with default whole-genome size"" f"" {size} as partitioning for GVCF import"") elif use_exome_default_intervals: size = CombinerConfig.default_exome_interval_size intervals = calculate_even_genome_partitioning(reference_genome, size) info(f""Using {len(intervals)} intervals with default exome size"" f"" {import_interval_size} as partitioning for GVCF import"") assert intervals is not None config = CombinerConfig(branch_factor=branch_factor, batch_size=batch_size, target_records=target_records) plan = config.plan(len(sample_paths)) files_to_merge = sample_paths n_phases = len(plan.phases) total_ops = len(files_to_merge) * n_phases total_work_done = 0 for phase_i, phase in enumerate(plan.phases): phase_i += 1 # used for info messages, 1-indexed for readability n_jobs = len(phase.jobs) merge_str = 'input GVCFs' if phase_i == 1 else 'intermediate sparse matrix tables' job_str = hl.utils.misc.plural('job', n_jobs) info(f""Starting phase {phase_i}/{n_phases}, merging {len(files_to_merge)} {merge_str} in {n_jobs} {job_str}."") if phase_i > 1: intervals = calculate_new_intervals(hl.read_matrix_table(files_to_merge[0]).rows(), config.target_records, reference_genome=reference_genome) new_files_to_merge = [] for job_i, job in enumerate(phase.jobs): job_i += 1 # used for info messages, 1-indexed for readability n_merges = len(job.merges) merge_str = hl.utils.misc.plural('file', n_merges) pct_total = 100 * job.input_total_size / total_ops info( f""Starting phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)} to create {n_merges} merged {merge_str}, corresponding to ~{pct_total:.1f}% of total I/O."") merge_mts: List[MatrixTable] = [] for merge in job.merges: inputs = [files_to_merge[i] for i in merge.inputs] if phase_i == 1: mts = [transform_gvcf(vcf) for vcf in hl.import_gvcfs(inputs, intervals, array_elements_required=False, _external_header=header, _external_sample_ids=[sample_names[i] for i in merge.inputs] if header is not None else None, reference_genome=reference_genome, contig_recoding=contig_recoding)] else: mts = [hl.read_matrix_table(path, _intervals=intervals) for path in inputs] merge_mts.append(combine_gvcfs(mts)) if phase_i == n_phases: # final merge! assert n_jobs == 1 assert len(merge_mts) == 1 [final_mt] = merge_mts if key_by_locus_and_alleles: final_mt = MatrixTable(MatrixKeyRowsBy(final_mt._mir, ['locus', 'alleles'], is_sorted=True)) final_mt.write(out_file, overwrite=overwrite) new_files_to_merge = [out_file] info(f""Finished phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, 100% of total I/O finished."") break tmp = f'{tmp_path}_phase{phase_i}_job{job_i}/' hl.experimental.write_matrix_tables(merge_mts, tmp, overwrite=True) pad = len(str(len(merge_mts))) new_files_to_merge.extend(tmp + str(n).zfill(pad) + '.mt' for n in range(len(merge_mts))) total_work_done += job.input_total_size info( f""Finished {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, {100 * total_work_done / total_ops:.1f}% of total I/O finished."") info(f""Finished phase {phase_i}/{n_phases}."") files_to_merge = new_files_to_merge assert files_to_merge == [out_file] info(""Finished!"") " 29781,"def save_converted_files(res, item_dicoms, bids_options, outtype, prefix, outname_bids, overwrite): """"""Copy converted files from tempdir to output directory. Will rename files if necessary. Parameters ---------- res : Node Nipype conversion Node with results item_dicoms: list of filenames DICOMs converted bids : list or None If not list save to BIDS List may contain bids specific options prefix : string Returns ------- bids_outfiles Converted BIDS files """""" from nipype.interfaces.base import isdefined prefix_dirname, prefix_basename = op.split(prefix) bids_outfiles = [] res_files = res.outputs.converted_files if not len(res_files): lgr.debug(""DICOMs {} were not converted"".format(item_dicoms)) return if isdefined(res.outputs.bvecs) and isdefined(res.outputs.bvals): if prefix_dirname.endswith('dwi'): outname_bvecs, outname_bvals = prefix + '.bvec', prefix + '.bval' safe_movefile(res.outputs.bvecs, outname_bvecs, overwrite) safe_movefile(res.outputs.bvals, outname_bvals, overwrite) else: os.remove(res.outputs.bvecs) os.remove(res.outputs.bvals) lgr.debug(""%s and %s were removmed"", res.outputs.bvecs, res.outputs.bvals) if isinstance(res_files, list): res_files = sorted(res_files) # we should provide specific handling for fmap, # dwi etc which might spit out multiple files suffixes = ([str(i+1) for i in range(len(res_files))] if (bids_options is not None) else None) if not suffixes: lgr.warning(""Following series files likely have "" ""multiple (%d) volumes (orientations?) "" ""generated: %s ..."", len(res_files), item_dicoms[0]) suffixes = [str(-i-1) for i in range(len(res_files))] # Also copy BIDS files although they might need to # be merged/postprocessed later bids_files = (sorted(res.outputs.bids) if len(res.outputs.bids) == len(res_files) else [None] * len(res_files)) # preload since will be used in multiple spots bids_metas = [load_json(b) for b in bids_files if b] ### Do we have a multi-echo series? ### # Some Siemens sequences (e.g. CMRR's MB-EPI) set the label 'TE1', # 'TE2', etc. in the 'ImageType' field. However, other seqs do not # (e.g. MGH ME-MPRAGE). They do set a 'EchoNumber', but not for the # first echo. To compound the problem, the echoes are NOT in order, # so the first NIfTI file does not correspond to echo-1, etc. So, we # need to know, beforehand, whether we are dealing with a multi-echo # series. To do that, the most straightforward way is to read the # echo times for all bids_files and see if they are all the same or not. # Collect some metadata across all images echo_times, channel_names, image_types = set(), set(), set() for metadata in bids_metas: if not metadata: continue echo_times.add(metadata.get('EchoTime', nan)) channel_names.add(metadata.get('CoilString', nan)) image_types.update(metadata.get('ImageType', [nan])) is_multiecho = len(set(filter(bool, echo_times))) > 1 # Check for varying echo times is_uncombined = len(set(filter(bool, channel_names))) > 1 # Check for uncombined data is_complex = 'M' in image_types and 'P' in image_types # Determine if data are complex (magnitude + phase) ### Loop through the bids_files, set the output name and save files for fl, suffix, bids_file, bids_meta in zip(res_files, suffixes, bids_files, bids_metas): # TODO: monitor conversion duration # set the prefix basename for this specific file (we'll modify it, # and we don't want to modify it for all the bids_files): this_prefix_basename = prefix_basename # Update name for certain criteria if bids_file: if is_multiecho: this_prefix_basename = update_multiecho_name( bids_meta, this_prefix_basename, echo_times ) if is_complex: this_prefix_basename = update_complex_name( bids_meta, this_prefix_basename, suffix ) if is_uncombined: this_prefix_basename = update_uncombined_name( bids_meta, this_prefix_basename, channel_names ) # Fallback option: # If we have failed to modify this_prefix_basename, because it didn't fall # into any of the options above, just add the suffix at the end: if this_prefix_basename == prefix_basename: this_prefix_basename += suffix # Finally, form the outname by stitching the directory and outtype: outname = op.join(prefix_dirname, this_prefix_basename) outfile = outname + '.' + outtype # Write the files needed: safe_movefile(fl, outfile, overwrite) if bids_file: outname_bids_file = ""%s.json"" % (outname) safe_movefile(bids_file, outname_bids_file, overwrite) bids_outfiles.append(outname_bids_file) # res_files is not a list else: outname = ""{}.{}"".format(prefix, outtype) safe_movefile(res_files, outname, overwrite) if isdefined(res.outputs.bids): try: safe_movefile(res.outputs.bids, outname_bids, overwrite) bids_outfiles.append(outname_bids) except TypeError as exc: ##catch lists raise TypeError(""Multiple BIDS sidecars detected."") return bids_outfiles ","def save_converted_files(res, item_dicoms, bids_options, outtype, prefix, outname_bids, overwrite): """"""Copy converted files from tempdir to output directory. Will rename files if necessary. Parameters ---------- res : Node Nipype conversion Node with results item_dicoms: list of filenames DICOMs converted bids : list or None If not list save to BIDS List may contain bids specific options prefix : string Returns ------- bids_outfiles Converted BIDS files """""" from nipype.interfaces.base import isdefined prefix_dirname, prefix_basename = op.split(prefix) bids_outfiles = [] res_files = res.outputs.converted_files if not len(res_files): lgr.debug(""DICOMs {} were not converted"".format(item_dicoms)) return if isdefined(res.outputs.bvecs) and isdefined(res.outputs.bvals): if prefix_dirname.endswith('dwi'): outname_bvecs, outname_bvals = prefix + '.bvec', prefix + '.bval' safe_movefile(res.outputs.bvecs, outname_bvecs, overwrite) safe_movefile(res.outputs.bvals, outname_bvals, overwrite) else: os.remove(res.outputs.bvecs) os.remove(res.outputs.bvals) lgr.debug(""%s and %s were removed since not dwi"", res.outputs.bvecs, res.outputs.bvals) if isinstance(res_files, list): res_files = sorted(res_files) # we should provide specific handling for fmap, # dwi etc which might spit out multiple files suffixes = ([str(i+1) for i in range(len(res_files))] if (bids_options is not None) else None) if not suffixes: lgr.warning(""Following series files likely have "" ""multiple (%d) volumes (orientations?) "" ""generated: %s ..."", len(res_files), item_dicoms[0]) suffixes = [str(-i-1) for i in range(len(res_files))] # Also copy BIDS files although they might need to # be merged/postprocessed later bids_files = (sorted(res.outputs.bids) if len(res.outputs.bids) == len(res_files) else [None] * len(res_files)) # preload since will be used in multiple spots bids_metas = [load_json(b) for b in bids_files if b] ### Do we have a multi-echo series? ### # Some Siemens sequences (e.g. CMRR's MB-EPI) set the label 'TE1', # 'TE2', etc. in the 'ImageType' field. However, other seqs do not # (e.g. MGH ME-MPRAGE). They do set a 'EchoNumber', but not for the # first echo. To compound the problem, the echoes are NOT in order, # so the first NIfTI file does not correspond to echo-1, etc. So, we # need to know, beforehand, whether we are dealing with a multi-echo # series. To do that, the most straightforward way is to read the # echo times for all bids_files and see if they are all the same or not. # Collect some metadata across all images echo_times, channel_names, image_types = set(), set(), set() for metadata in bids_metas: if not metadata: continue echo_times.add(metadata.get('EchoTime', nan)) channel_names.add(metadata.get('CoilString', nan)) image_types.update(metadata.get('ImageType', [nan])) is_multiecho = len(set(filter(bool, echo_times))) > 1 # Check for varying echo times is_uncombined = len(set(filter(bool, channel_names))) > 1 # Check for uncombined data is_complex = 'M' in image_types and 'P' in image_types # Determine if data are complex (magnitude + phase) ### Loop through the bids_files, set the output name and save files for fl, suffix, bids_file, bids_meta in zip(res_files, suffixes, bids_files, bids_metas): # TODO: monitor conversion duration # set the prefix basename for this specific file (we'll modify it, # and we don't want to modify it for all the bids_files): this_prefix_basename = prefix_basename # Update name for certain criteria if bids_file: if is_multiecho: this_prefix_basename = update_multiecho_name( bids_meta, this_prefix_basename, echo_times ) if is_complex: this_prefix_basename = update_complex_name( bids_meta, this_prefix_basename, suffix ) if is_uncombined: this_prefix_basename = update_uncombined_name( bids_meta, this_prefix_basename, channel_names ) # Fallback option: # If we have failed to modify this_prefix_basename, because it didn't fall # into any of the options above, just add the suffix at the end: if this_prefix_basename == prefix_basename: this_prefix_basename += suffix # Finally, form the outname by stitching the directory and outtype: outname = op.join(prefix_dirname, this_prefix_basename) outfile = outname + '.' + outtype # Write the files needed: safe_movefile(fl, outfile, overwrite) if bids_file: outname_bids_file = ""%s.json"" % (outname) safe_movefile(bids_file, outname_bids_file, overwrite) bids_outfiles.append(outname_bids_file) # res_files is not a list else: outname = ""{}.{}"".format(prefix, outtype) safe_movefile(res_files, outname, overwrite) if isdefined(res.outputs.bids): try: safe_movefile(res.outputs.bids, outname_bids, overwrite) bids_outfiles.append(outname_bids) except TypeError as exc: ##catch lists raise TypeError(""Multiple BIDS sidecars detected."") return bids_outfiles " 3066,"def pprint_thing( thing: Any, _nest_lvl: int = 0, escape_chars: Optional[EscapeChars] = None, default_escapes: bool = False, quote_strings: bool = False, max_seq_items: Optional[int] = None, ) -> str: """""" This function is the sanctioned way of converting objects to a string representation and properly handles nested sequences. Parameters ---------- thing : anything to be formatted _nest_lvl : internal use only. pprint_thing() is mutually-recursive with pprint_sequence, this argument is used to keep track of the current nesting level, and limit it. escape_chars : list or dict, optional Characters to escape. If a dict is passed the values are the replacements default_escapes : bool, default False Whether the input escape characters replaces or adds to the defaults max_seq_items : int or None, default None Pass through to other pretty printers to limit sequence printing Returns ------- str """""" def as_escaped_string( thing: Any, escape_chars: Optional[EscapeChars] = escape_chars ) -> str: translate = {""\t"": r""\t"", ""\n"": r""\n"", ""\r"": r""\r""} if isinstance(escape_chars, dict): if default_escapes: translate.update(escape_chars) else: translate = escape_chars escape_chars = list(escape_chars.keys()) else: escape_chars = escape_chars or tuple() result = str(thing) for c in escape_chars: result = result.replace(c, translate[c]) return result if hasattr(thing, ""__next__""): return str(thing) elif isinstance(thing, dict) and _nest_lvl < get_option( ""display.pprint_nest_depth"" ): result = _pprint_dict( thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items ) elif is_sequence(thing) and _nest_lvl < get_option(""display.pprint_nest_depth""): result = _pprint_seq( thing, _nest_lvl, escape_chars=escape_chars, quote_strings=quote_strings, max_seq_items=max_seq_items, ) elif isinstance(thing, str) and quote_strings: result = f""'{as_escaped_string(thing)}'"" else: result = as_escaped_string(thing) return result ","def pprint_thing( thing: Any, _nest_lvl: int = 0, escape_chars: Optional[EscapeChars] = None, default_escapes: bool = False, quote_strings: bool = False, max_seq_items: Optional[int] = None, ) -> str: """""" This function is the sanctioned way of converting objects to a string representation and properly handles nested sequences. Parameters ---------- thing : anything to be formatted _nest_lvl : internal use only. pprint_thing() is mutually-recursive with pprint_sequence, this argument is used to keep track of the current nesting level, and limit it. escape_chars : list or dict, optional Characters to escape. If a dict is passed the values are the replacements default_escapes : bool, default False Whether the input escape characters replaces or adds to the defaults max_seq_items : int or None, default None Pass through to other pretty printers to limit sequence printing Returns ------- str """""" def as_escaped_string( thing: Any, escape_chars: Optional[EscapeChars] = escape_chars ) -> str: translate = {""\t"": r""\t"", ""\n"": r""\n"", ""\r"": r""\r""} if isinstance(escape_chars, dict): if default_escapes: translate.update(escape_chars) else: translate = escape_chars escape_chars = list(escape_chars.keys()) else: escape_chars = escape_chars or tuple() result = str(thing) for c in escape_chars: result = result.replace(c, translate[c]) return result if hasattr(thing, ""__next__""): return str(thing) elif isinstance(thing, dict) and _nest_lvl < get_option( ""display.pprint_nest_depth"" ): result = _pprint_dict( thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items ) elif is_sequence(thing) and _nest_lvl < get_option(""display.pprint_nest_depth""): result = _pprint_seq( thing, _nest_lvl, escape_chars=escape_chars, quote_strings=quote_strings, max_seq_items=max_seq_items, ) elif isinstance(thing, str) and quote_strings: result = repr(as_escaped_string(thing)) else: result = as_escaped_string(thing) return result " 30192,"def bigquery_etl_copy_deduplicate( task_id, target_project_id, only_tables=None, except_tables=None, parallelism=4, hourly=False, gcp_conn_id=""google_cloud_derived_datasets"", gke_location=""us-central1-a"", gke_cluster_name=""bq-load-gke-1"", gke_namespace=""default"", docker_image=""mozilla/bigquery-etl:latest"", image_pull_policy=""Always"", **kwargs ): """""" Copy a day's data from live ping tables to stable ping tables, deduplicating on document_id. :param str task_id: [Required] ID for the task :param str target_project_id: [Required] ID of project where target tables live :param Tuple[str] only_tables: Only process tables matching the given globs of form 'telemetry_live.main_v*' :param Tuple[str] except_tables: Process all tables except those matching the given globs :param int parallelism: Maximum number of queries to execute concurrently :param bool houlry: Deduplicate one hour at a time, rather than for whole days at once :param str gcp_conn_id: Airflow connection id for GCP access :param str gke_location: GKE cluster location :param str gke_cluster_name: GKE cluster name :param str gke_namespace: GKE cluster namespace :param str docker_image: docker image to use :param str image_pull_policy: Kubernetes policy for when to pull docker_image :param Dict[str, Any] kwargs: Additional keyword arguments for GKEPodOperator :return: GKEPodOperator """""" kwargs[""name""] = kwargs.get(""name"", task_id.replace(""_"", ""-"")) table_qualifiers = [] if only_tables: table_qualifiers.append('--only') table_qualifiers += only_tables if except_tables: table_qualifiers.append('--except') table_qualifiers += except_tables return GKEPodOperator( task_id=task_id, gcp_conn_id=gcp_conn_id, project_id=GoogleCloudBaseHook(gcp_conn_id=gcp_conn_id).project_id, location=gke_location, cluster_name=gke_cluster_name, namespace=gke_namespace, image=docker_image, arguments=[""script/copy_deduplicate""] + [""--project-id="" + target_project_id] + [""--date={{ds}}""] + [""--parallelism={}"".format(parallelism)] + ([""--hourly""] if hourly else []) + table_qualifiers, image_pull_policy=image_pull_policy, **kwargs ) ","def bigquery_etl_copy_deduplicate( task_id, target_project_id, only_tables=None, except_tables=None, parallelism=4, hourly=False, gcp_conn_id=""google_cloud_derived_datasets"", gke_location=""us-central1-a"", gke_cluster_name=""bq-load-gke-1"", gke_namespace=""default"", docker_image=""mozilla/bigquery-etl:latest"", image_pull_policy=""Always"", **kwargs ): """""" Copy a day's data from live ping tables to stable ping tables, deduplicating on document_id. :param str task_id: [Required] ID for the task :param str target_project_id: [Required] ID of project where target tables live :param Tuple[str] only_tables: Only process tables matching the given globs of form 'telemetry_live.main_v*' :param Tuple[str] except_tables: Process all tables except those matching the given globs :param int parallelism: Maximum number of queries to execute concurrently :param bool hourly: Deduplicate one hour at a time, rather than for whole days at once :param str gcp_conn_id: Airflow connection id for GCP access :param str gke_location: GKE cluster location :param str gke_cluster_name: GKE cluster name :param str gke_namespace: GKE cluster namespace :param str docker_image: docker image to use :param str image_pull_policy: Kubernetes policy for when to pull docker_image :param Dict[str, Any] kwargs: Additional keyword arguments for GKEPodOperator :return: GKEPodOperator """""" kwargs[""name""] = kwargs.get(""name"", task_id.replace(""_"", ""-"")) table_qualifiers = [] if only_tables: table_qualifiers.append('--only') table_qualifiers += only_tables if except_tables: table_qualifiers.append('--except') table_qualifiers += except_tables return GKEPodOperator( task_id=task_id, gcp_conn_id=gcp_conn_id, project_id=GoogleCloudBaseHook(gcp_conn_id=gcp_conn_id).project_id, location=gke_location, cluster_name=gke_cluster_name, namespace=gke_namespace, image=docker_image, arguments=[""script/copy_deduplicate""] + [""--project-id="" + target_project_id] + [""--date={{ds}}""] + [""--parallelism={}"".format(parallelism)] + ([""--hourly""] if hourly else []) + table_qualifiers, image_pull_policy=image_pull_policy, **kwargs ) " 38369,"def validate_image_name(filename, suffix: Optional[str] = None) -> str: """""" Build a valid image filename with a specified extension (default to png). The suffix parameter is ignored if the input filename has a valid extension already. Otherwise, suffix is appended to the filename, replacing any existing extension. """""" name, psuffix = os.path.splitext(filename) if psuffix in SUPPORTED_FORMATS: if suffix is not None: suffix = normalize_extension_string(suffix) if suffix in SUPPORTED_FORMATS and suffix != psuffix: mylog.warning( ""Received two valid image format '%s' and '%s'. "" ""The former is ignored."", psuffix, suffix, ) return f""{name}{suffix}"" return str(filename) if suffix is None: suffix = "".png"" suffix = normalize_extension_string(suffix) if suffix not in SUPPORTED_FORMATS: raise ValueError(""Unsupported file format '{suffix}'."") return f""{filename}{suffix}"" ","def validate_image_name(filename, suffix: Optional[str] = None) -> str: """""" Build a valid image filename with a specified extension (default to png). The suffix parameter is ignored if the input filename has a valid extension already. Otherwise, suffix is appended to the filename, replacing any existing extension. """""" name, psuffix = os.path.splitext(filename) if psuffix in SUPPORTED_FORMATS: if suffix is not None: suffix = normalize_extension_string(suffix) if suffix in SUPPORTED_FORMATS and suffix != psuffix: mylog.warning( ""Received two valid image formats '%s' and '%s'. "" ""The former is ignored."", psuffix, suffix, ) return f""{name}{suffix}"" return str(filename) if suffix is None: suffix = "".png"" suffix = normalize_extension_string(suffix) if suffix not in SUPPORTED_FORMATS: raise ValueError(""Unsupported file format '{suffix}'."") return f""{filename}{suffix}"" " 51489,"def _temp_dataarray(ds, y, locals_): """"""Create a temporary datarray with extra coords."""""" from ..core.dataarray import DataArray # Base coords: coords = dict(ds.coords) # Add extra coords to the DataArray from valid kwargs, if using all # kwargs there is a risk that we add unneccessary dataarrays as # coords straining RAM further for example: # ds.both and extend=""both"" would add ds.both to the coords: valid_coord_kwargs = {""x"", ""z"", ""markersize"", ""hue"", ""row"", ""col"", ""u"", ""v""} for k in locals_.keys() & valid_coord_kwargs: key = locals_[k] if ds.data_vars.get(key) is not None: coords[key] = ds[key] # The dataarray has to include all the dims. Broadcast to that shape # and add the additional coords: _y = ds[y].broadcast_like(ds) return DataArray(_y, coords=coords) ","def _temp_dataarray(ds, y, locals_): """"""Create a temporary datarray with extra coords."""""" from ..core.dataarray import DataArray # Base coords: coords = dict(ds.coords) # Add extra coords to the DataArray from valid kwargs, if using all # kwargs there is a risk that we add unneccessary dataarrays as # coords straining RAM further for example: # ds.both and extend=""both"" would add ds.both to the coords: valid_coord_kwargs = {""x"", ""z"", ""markersize"", ""hue"", ""row"", ""col"", ""u"", ""v""} coord_kwargs = locals_.keys() & valid_coord_kwargs for k in coord_kwargs: key = locals_[k] if ds.data_vars.get(key) is not None: coords[key] = ds[key] # The dataarray has to include all the dims. Broadcast to that shape # and add the additional coords: _y = ds[y].broadcast_like(ds) return DataArray(_y, coords=coords) " 43462,"def AmplitudeEmbedding(features, wires, pad): r""""""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits. If the total number of features to embed are less than the :math:`2^n` available amplitudes, non-informative constants (zeros) can be padded to ``features``. To avail this, the argument ``pad`` should be set to ``True``. The absolute square of all elements in ``features`` has to add up to one. .. note:: AmplitudeEmbedding uses PennyLane's :class:`~pennylane.ops.QubitStateVector` and only works in conjunction with devices that implement this function. Args: features (array): Input array of shape ``(2**n,)`` wires (Sequence[int]): sequence of qubit indices that the template acts on pad (Boolean): controls the activation of the padding option """""" if not isinstance(wires, Iterable): raise ValueError(""Wires needs to be a list of wires that the embedding uses; got {}."".format(wires)) if pad==True and 2**len(wires) != len(features): features = np.pad(features, (0, 2**len(wires)-len(features)), 'constant') if pad==False and 2**len(wires) != len(features): raise ValueError(""AmplitudeEmbedding with no padding requires a feature vector of size 2**len(wires), which is {}; "" ""got {}."".format(2 ** len(wires), len(features))) if np.linalg.norm(features,2) != 1: raise ValueError(""AmplitudeEmbedding requires a normalized feature vector."") QubitStateVector(features, wires=wires) ","def AmplitudeEmbedding(features, wires, pad): r""""""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits. If the total number of features to embed are less than the :math:`2^n` available amplitudes, non-informative constants (zeros) can be padded to ``features``. To avail this, the argument ``pad`` should be set to ``True``. The absolute square of all elements in ``features`` has to add up to one. .. note:: AmplitudeEmbedding uses PennyLane's :class:`~pennylane.ops.QubitStateVector` and only works in conjunction with devices that implement this function. Args: features (array): Input array of shape ``(2**n,)`` wires (Sequence[int]): sequence of qubit indices that the template acts on pad (Boolean): controls the activation of the padding option """""" if not isinstance(wires, Iterable): raise ValueError(""Wires needs to be a list of wires that the embedding uses; got {}."".format(wires)) if pad and 2**len(wires) != len(features): features = np.pad(features, (0, 2**len(wires)-len(features)), 'constant') if pad==False and 2**len(wires) != len(features): raise ValueError(""AmplitudeEmbedding with no padding requires a feature vector of size 2**len(wires), which is {}; "" ""got {}."".format(2 ** len(wires), len(features))) if np.linalg.norm(features,2) != 1: raise ValueError(""AmplitudeEmbedding requires a normalized feature vector."") QubitStateVector(features, wires=wires) " 49579,"def read_pandas( reader, urlpath, blocksize=""default"", lineterminator=None, compression=""infer"", sample=256000, sample_rows=10, enforce=False, assume_missing=False, storage_options=None, include_path_column=False, **kwargs, ): reader_name = reader.__name__ if lineterminator is not None and len(lineterminator) == 1: kwargs[""lineterminator""] = lineterminator else: lineterminator = ""\n"" if include_path_column and isinstance(include_path_column, bool): include_path_column = ""path"" if ""index"" in kwargs or ""index_col"" in kwargs: raise ValueError( ""Keywords 'index' and 'index_col' not supported. "" ""Use dd.{0}(...).set_index('my-index') "" ""instead"".format(reader_name) ) for kw in [""iterator"", ""chunksize""]: if kw in kwargs: raise ValueError(""{0} not supported for dd.{1}"".format(kw, reader_name)) if kwargs.get(""nrows"", None): raise ValueError( ""The 'nrows' keyword is not supported by "" ""`dd.{0}`. To achieve the same behavior, it's "" ""recommended to use `dd.{0}(...)."" ""head(n=nrows)`"".format(reader_name) ) if isinstance(kwargs.get(""skiprows""), int): skiprows = lastskiprow = firstrow = kwargs.get(""skiprows"") elif kwargs.get(""skiprows"") is None: skiprows = lastskiprow = firstrow = 0 else: # When skiprows is a list, we expect more than max(skiprows) to # be included in the sample. This means that [0,2] will work well, # but [0, 440] might not work. skiprows = set(kwargs.get(""skiprows"")) lastskiprow = max(skiprows) # find the firstrow that is not skipped, for use as header firstrow = min(set(range(len(skiprows) + 1)) - set(skiprows)) if isinstance(kwargs.get(""header""), list): raise TypeError( ""List of header rows not supported for dd.{0}"".format(reader_name) ) if isinstance(kwargs.get(""converters""), dict) and include_path_column: path_converter = kwargs.get(""converters"").get(include_path_column, None) else: path_converter = None # If compression is ""infer"", inspect the (first) path suffix and # set the proper compression option if the suffix is recongnized. if compression == ""infer"": # Translate the input urlpath to a simple path list paths = get_fs_token_paths(urlpath, mode=""rb"", storage_options=storage_options)[ 2 ] # Infer compression from first path compression = infer_compression(paths[0]) if blocksize == ""default"": blocksize = AUTO_BLOCKSIZE if isinstance(blocksize, str): blocksize = parse_bytes(blocksize) if blocksize and compression: # NONE of the compressions should use chunking warn( ""Warning %s compression does not support breaking apart files\n"" ""Please ensure that each individual file can fit in memory and\n"" ""use the keyword ``blocksize=None to remove this message``\n"" ""Setting ``blocksize=None``"" % compression ) blocksize = None if compression not in compr: raise NotImplementedError(""Compression format %s not installed"" % compression) if blocksize and sample and blocksize < sample and lastskiprow != 0: warn( ""Unexpected behavior can result from passing skiprows when\n"" ""blocksize is smaller than sample size.\n"" ""Setting ``sample=blocksize``"" ) sample = blocksize b_lineterminator = lineterminator.encode() b_out = read_bytes( urlpath, delimiter=b_lineterminator, blocksize=blocksize, sample=sample, compression=compression, include_path=include_path_column, **(storage_options or {}), ) if include_path_column: b_sample, values, paths = b_out path = (include_path_column, path_converter) else: b_sample, values = b_out path = None if not isinstance(values[0], (tuple, list)): values = [values] # If we have not sampled, then use the first row of the first values # as a representative sample. if b_sample is False and len(values[0]): b_sample = values[0][0].compute() # Get header row, and check that sample is long enough. If the file # contains a header row, we need at least 2 nonempty rows + the number of # rows to skip. names = kwargs.get(""names"", None) header = kwargs.get(""header"", ""infer"" if names is None else None) need = 1 if header is None else 2 parts = b_sample.split(b_lineterminator, lastskiprow + need) # If the last partition is empty, don't count it nparts = 0 if not parts else len(parts) - int(not parts[-1]) if sample is not False and nparts < lastskiprow + need and len(b_sample) >= sample: raise ValueError( ""Sample is not large enough to include at least one "" ""row of data. Please increase the number of bytes "" ""in `sample` in the call to `read_csv`/`read_table`"" ) header = b"""" if header is None else parts[firstrow] + b_lineterminator # Use sample to infer dtypes and check for presence of include_path_column head_kwargs = kwargs.copy() if sample_rows is not None: head_kwargs[""nrows""] = sample_rows try: head = reader(BytesIO(b_sample), **head_kwargs) except pd.errors.ParserError as e: if ""EOF"" in str(e): raise ValueError( ""EOF encountered while reading header. \n"" ""Pass argument `sample_rows=` and make sure the value of `sample` "" ""is large enough to accommodate that may rows of data"" ) from e raise if include_path_column and (include_path_column in head.columns): raise ValueError( ""Files already contain the column name: %s, so the "" ""path column cannot use this name. Please set "" ""`include_path_column` to a unique name."" % include_path_column ) specified_dtypes = kwargs.get(""dtype"", {}) if specified_dtypes is None: specified_dtypes = {} # If specified_dtypes is a single type, then all columns were specified if assume_missing and isinstance(specified_dtypes, dict): # Convert all non-specified integer columns to floats for c in head.columns: if is_integer_dtype(head[c].dtype) and c not in specified_dtypes: head[c] = head[c].astype(float) values = [[list(dsk.dask.values()) for dsk in block] for block in values] return text_blocks_to_pandas( reader, values, header, head, kwargs, enforce=enforce, specified_dtypes=specified_dtypes, path=path, blocksize=blocksize, ) ","def read_pandas( reader, urlpath, blocksize=""default"", lineterminator=None, compression=""infer"", sample=256000, sample_rows=10, enforce=False, assume_missing=False, storage_options=None, include_path_column=False, **kwargs, ): reader_name = reader.__name__ if lineterminator is not None and len(lineterminator) == 1: kwargs[""lineterminator""] = lineterminator else: lineterminator = ""\n"" if include_path_column and isinstance(include_path_column, bool): include_path_column = ""path"" if ""index"" in kwargs or ""index_col"" in kwargs: raise ValueError( ""Keywords 'index' and 'index_col' not supported. "" ""Use dd.{0}(...).set_index('my-index') "" ""instead"".format(reader_name) ) for kw in [""iterator"", ""chunksize""]: if kw in kwargs: raise ValueError(""{0} not supported for dd.{1}"".format(kw, reader_name)) if kwargs.get(""nrows"", None): raise ValueError( ""The 'nrows' keyword is not supported by "" ""`dd.{0}`. To achieve the same behavior, it's "" ""recommended to use `dd.{0}(...)."" ""head(n=nrows)`"".format(reader_name) ) if isinstance(kwargs.get(""skiprows""), int): skiprows = lastskiprow = firstrow = kwargs.get(""skiprows"") elif kwargs.get(""skiprows"") is None: skiprows = lastskiprow = firstrow = 0 else: # When skiprows is a list, we expect more than max(skiprows) to # be included in the sample. This means that [0,2] will work well, # but [0, 440] might not work. skiprows = set(kwargs.get(""skiprows"")) lastskiprow = max(skiprows) # find the firstrow that is not skipped, for use as header firstrow = min(set(range(len(skiprows) + 1)) - set(skiprows)) if isinstance(kwargs.get(""header""), list): raise TypeError( ""List of header rows not supported for dd.{0}"".format(reader_name) ) if isinstance(kwargs.get(""converters""), dict) and include_path_column: path_converter = kwargs.get(""converters"").get(include_path_column, None) else: path_converter = None # If compression is ""infer"", inspect the (first) path suffix and # set the proper compression option if the suffix is recongnized. if compression == ""infer"": # Translate the input urlpath to a simple path list paths = get_fs_token_paths(urlpath, mode=""rb"", storage_options=storage_options)[ 2 ] # Infer compression from first path compression = infer_compression(paths[0]) if blocksize == ""default"": blocksize = AUTO_BLOCKSIZE if isinstance(blocksize, str): blocksize = parse_bytes(blocksize) if blocksize and compression: # NONE of the compressions should use chunking warn( ""Warning %s compression does not support breaking apart files\n"" ""Please ensure that each individual file can fit in memory and\n"" ""use the keyword ``blocksize=None to remove this message``\n"" ""Setting ``blocksize=None``"" % compression ) blocksize = None if compression not in compr: raise NotImplementedError(""Compression format %s not installed"" % compression) if blocksize and sample and blocksize < sample and lastskiprow != 0: warn( ""Unexpected behavior can result from passing skiprows when\n"" ""blocksize is smaller than sample size.\n"" ""Setting ``sample=blocksize``"" ) sample = blocksize b_lineterminator = lineterminator.encode() b_out = read_bytes( urlpath, delimiter=b_lineterminator, blocksize=blocksize, sample=sample, compression=compression, include_path=include_path_column, **(storage_options or {}), ) if include_path_column: b_sample, values, paths = b_out path = (include_path_column, path_converter) else: b_sample, values = b_out path = None if not isinstance(values[0], (tuple, list)): values = [values] # If we have not sampled, then use the first row of the first values # as a representative sample. if b_sample is False and len(values[0]): b_sample = values[0][0].compute() # Get header row, and check that sample is long enough. If the file # contains a header row, we need at least 2 nonempty rows + the number of # rows to skip. names = kwargs.get(""names"", None) header = kwargs.get(""header"", ""infer"" if names is None else None) need = 1 if header is None else 2 parts = b_sample.split(b_lineterminator, lastskiprow + need) # If the last partition is empty, don't count it nparts = 0 if not parts else len(parts) - int(not parts[-1]) if sample is not False and nparts < lastskiprow + need and len(b_sample) >= sample: raise ValueError( ""Sample is not large enough to include at least one "" ""row of data. Please increase the number of bytes "" ""in `sample` in the call to `read_csv`/`read_table`"" ) header = b"""" if header is None else parts[firstrow] + b_lineterminator # Use sample to infer dtypes and check for presence of include_path_column head_kwargs = kwargs.copy() if sample_rows is not None: head_kwargs[""nrows""] = sample_rows try: head = reader(BytesIO(b_sample), **head_kwargs) except pd.errors.ParserError as e: if ""EOF"" in str(e): raise ValueError( ""EOF encountered while reading header. \n"" ""Pass argument `sample_rows` and make sure the value of `sample` "" ""is large enough to accommodate that many rows of data"" ) from e raise if include_path_column and (include_path_column in head.columns): raise ValueError( ""Files already contain the column name: %s, so the "" ""path column cannot use this name. Please set "" ""`include_path_column` to a unique name."" % include_path_column ) specified_dtypes = kwargs.get(""dtype"", {}) if specified_dtypes is None: specified_dtypes = {} # If specified_dtypes is a single type, then all columns were specified if assume_missing and isinstance(specified_dtypes, dict): # Convert all non-specified integer columns to floats for c in head.columns: if is_integer_dtype(head[c].dtype) and c not in specified_dtypes: head[c] = head[c].astype(float) values = [[list(dsk.dask.values()) for dsk in block] for block in values] return text_blocks_to_pandas( reader, values, header, head, kwargs, enforce=enforce, specified_dtypes=specified_dtypes, path=path, blocksize=blocksize, ) " 11918,"def _get_global_header(im, info): """"""Return a list of strings representing a GIF header"""""" # Header Block # https://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp version = b""87a"" for extensionKey in [""transparency"", ""duration"", ""loop"", ""comment""]: if info and extensionKey in info: if (extensionKey == ""duration"" and info[extensionKey] == 0) or ( extensionKey == ""comment"" and len(info[extensionKey]) == 0 ): continue version = b""89a"" break else: if im.info.get(""version"") == b""89a"": version = b""89a"" background = _get_background(im, info.get(""background"")) palette_bytes = _get_palette_bytes(im) color_table_size = _get_color_table_size(palette_bytes) return [ b""GIF"" # signature + version # version + o16(im.size[0]) # canvas width + o16(im.size[1]), # canvas height # Logical Screen Descriptor # size of global color table + global color table flag o8(color_table_size + 128), # packed fields # background + reserved/aspect o8(background) + o8(0), # Global Color Table _get_header_palette(palette_bytes), ] ","def _get_global_header(im, info): """"""Return a list of strings representing a GIF header"""""" # Header Block # https://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp version = b""87a"" for extensionKey in [""transparency"", ""duration"", ""loop"", ""comment""]: if info and extensionKey in info: if (extensionKey == ""duration"" and info[extensionKey] == 0) or ( extensionKey == ""comment"" and info[extensionKey] ): continue version = b""89a"" break else: if im.info.get(""version"") == b""89a"": version = b""89a"" background = _get_background(im, info.get(""background"")) palette_bytes = _get_palette_bytes(im) color_table_size = _get_color_table_size(palette_bytes) return [ b""GIF"" # signature + version # version + o16(im.size[0]) # canvas width + o16(im.size[1]), # canvas height # Logical Screen Descriptor # size of global color table + global color table flag o8(color_table_size + 128), # packed fields # background + reserved/aspect o8(background) + o8(0), # Global Color Table _get_header_palette(palette_bytes), ] " 58628,"def test_spacy_ner_extractor(spacy_nlp): ext = SpacyEntityExtractor({""dimensions"": [""LOC""]}) example = Message(""anywhere in the West"", { ""intent"": ""restaurant_search"", ""entities"": [], ""spacy_doc"": spacy_nlp(""anywhere in the west"")}) ext.process(example, spacy_nlp=spacy_nlp) assert len(example.get(""entities"", [])) == 1 assert example.get(""entities"")[0] == { 'start': 16, 'extractor': 'ner_spacy', 'end': 20, 'value': 'West', 'entity': 'LOC', 'confidence': None} # Test dimension filtering example = Message(""anywhere in the West"", { ""intent"": ""restaurant_search"", ""entities"": [], ""spacy_doc"": spacy_nlp(""anywhere in the west"")}) ext = SpacyEntityExtractor({""dimensions"": [""EVENT""]}) ext.process(example, spacy_nlp=spacy_nlp) assert len(example.get(""entities"", [])) == 0 ","def test_spacy_ner_extractor(spacy_nlp): _config = RasaNLUModelConfig({""pipeline"": [{""name"": ""SpacyEntityExtractor""}]}) ext = component_builder.create_component(_config.for_component(0), _config) example = Message(""anywhere in the West"", { ""intent"": ""restaurant_search"", ""entities"": [], ""spacy_doc"": spacy_nlp(""anywhere in the west"")}) ext.process(example, spacy_nlp=spacy_nlp) assert len(example.get(""entities"", [])) == 1 assert example.get(""entities"")[0] == { 'start': 16, 'extractor': 'ner_spacy', 'end': 20, 'value': 'West', 'entity': 'LOC', 'confidence': None} # Test dimension filtering example = Message(""anywhere in the West"", { ""intent"": ""restaurant_search"", ""entities"": [], ""spacy_doc"": spacy_nlp(""anywhere in the west"")}) ext = SpacyEntityExtractor({""dimensions"": [""EVENT""]}) ext.process(example, spacy_nlp=spacy_nlp) assert len(example.get(""entities"", [])) == 0 " 26727,"def load_entrypoint_plugins(): """""" Load and register plugins AirflowPlugin subclasses from the entrypoints. The entry_point group should be 'airflow.plugins'. """""" global import_errors # pylint: disable=global-statement global plugins # pylint: disable=global-statement entry_points = pkg_resources.iter_entry_points('airflow.plugins') log.debug(""Loading plugins from entrypoints"") for entry_point in entry_points: # pylint: disable=too-many-nested-blocks log.debug('Importing entry_point plugin %s', entry_point.name) try: plugin_class = entry_point.load() if is_valid_plugin(plugin_class): plugin_obj = plugin_class() if callable(getattr(plugin_obj, 'on_load', None)): plugin_obj.on_load() plugins.append(plugin_obj()) except Exception as e: # pylint: disable=broad-except log.exception(""Failed to import plugin %s"", entry_point.name) import_errors[entry_point.module_name] = str(e) ","def load_entrypoint_plugins(): """""" Load and register plugins AirflowPlugin subclasses from the entrypoints. The entry_point group should be 'airflow.plugins'. """""" global import_errors # pylint: disable=global-statement global plugins # pylint: disable=global-statement entry_points = pkg_resources.iter_entry_points('airflow.plugins') log.debug(""Loading plugins from entrypoints"") for entry_point in entry_points: # pylint: disable=too-many-nested-blocks log.debug('Importing entry_point plugin %s', entry_point.name) try: plugin_class = entry_point.load() if is_valid_plugin(plugin_class): plugin_instance = plugin_class() if callable(getattr(plugin_obj, 'on_load', None)): plugin_obj.on_load() plugins.append(plugin_obj()) except Exception as e: # pylint: disable=broad-except log.exception(""Failed to import plugin %s"", entry_point.name) import_errors[entry_point.module_name] = str(e) " 38417,"def get_radius(data, field_prefix, ftype): center = data.get_field_parameter(""center"").to(""code_length"") DW = (data.ds.domain_right_edge - data.ds.domain_left_edge).to(""code_length"") # This is in code_length so it can be the destination for our r later. radius2 = data.ds.arr( np.zeros(data[ftype, field_prefix + ""x""].shape, dtype=""float64""), ""code_length"" ) r = radius2.v if any(data.ds.periodicity): rdw = radius2.v for i, ax in enumerate(""xyz""): np.subtract( data[ftype, f""{field_prefix}{ax}""].d, center[i].d, r, ) if data.ds.periodicity[i]: np.abs(r, r) np.subtract(r, DW.d[i], rdw) np.abs(rdw, rdw) np.minimum(r, rdw, r) np.multiply(r, r, r) np.add(radius2.d, r, radius2.d) if data.ds.dimensionality < i + 1: break # Using the views into the array is not changing units and as such keeps # from having to do symbolic manipulations np.sqrt(radius2.d, radius2.d) # Alias it, just for clarity. radius = radius2 return radius ","def get_radius(data, field_prefix, ftype): center = data.get_field_parameter(""center"").to(""code_length"") DW = (data.ds.domain_right_edge - data.ds.domain_left_edge).to(""code_length"") # This is in code_length so it can be the destination for our r later. radius2 = data.ds.arr( np.zeros(data[ftype, field_prefix + ""x""].shape, dtype=""float64""), ""code_length"" ) r = np.empty(radius2.shape, dtype=""float64"") if any(data.ds.periodicity): rdw = radius2.v for i, ax in enumerate(""xyz""): np.subtract( data[ftype, f""{field_prefix}{ax}""].d, center[i].d, r, ) if data.ds.periodicity[i]: np.abs(r, r) np.subtract(r, DW.d[i], rdw) np.abs(rdw, rdw) np.minimum(r, rdw, r) np.multiply(r, r, r) np.add(radius2.d, r, radius2.d) if data.ds.dimensionality < i + 1: break # Using the views into the array is not changing units and as such keeps # from having to do symbolic manipulations np.sqrt(radius2.d, radius2.d) # Alias it, just for clarity. radius = radius2 return radius " 16693,"def _data_schema(schema_input: dict[Any, Any]) -> vol.Schema: """"""Generate schema with defaults."""""" return vol.Schema( { vol.Required(CONF_HOST, default=schema_input.get(CONF_HOST, """")): str, vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str, vol.Optional(CONF_TLS_VER, default=DEFAULT_TLS_VERSION): vol.In([1.1, 1.2]), }, extra=vol.ALLOW_EXTRA, ) ","def _data_schema(schema_input: dict[str, str]) -> vol.Schema: """"""Generate schema with defaults."""""" return vol.Schema( { vol.Required(CONF_HOST, default=schema_input.get(CONF_HOST, """")): str, vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str, vol.Optional(CONF_TLS_VER, default=DEFAULT_TLS_VERSION): vol.In([1.1, 1.2]), }, extra=vol.ALLOW_EXTRA, ) " 27500,"def upload_dags_to_composer(dags_directory: str, bucket_name: str) -> None: temp_dir, dags = _create_dags_list(dags_directory) if len(dags) > 0: # Note - the GCS client library does not currently support batch requests on uploads # if you have a large number of files, consider using # the Python subprocess module to run gsutil -m cp -r on your dags # See https://cloud.google.com/storage/docs/gsutil/commands/cp for more info storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) for dag in dags: # Remove path to temp dir dag = dag.replace(f""{temp_dir}/"", ""dags/"") # Upload to your bucket blob = bucket.blob(dag) blob.upload_from_file(dag) print(f""File {dag} uploaded to {bucket_name}/{dag}."") else: print(""No DAGs to upload."") ","def upload_dags_to_composer(dags_directory: str, bucket_name: str) -> None: temp_dir, dags = _create_dags_list(dags_directory) if len(dags) > 0: # Note - the GCS client library does not currently support batch requests on uploads # if you have a large number of files, consider using # the Python subprocess module to run gsutil -m cp -r on your dags # See https://cloud.google.com/storage/docs/gsutil/commands/cp for more info storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) for dag in dags: # Remove path to temp dir dag = dag.replace(f""{temp_dir}/"", ""dags/"") # Upload to your bucket blob = bucket.blob(dag) blob.upload_from_filename(dag) print(f""File {dag} uploaded to {bucket_name}/{dag}."") else: print(""No DAGs to upload."") " 52459,"def test_spans_span_sent(doc, doc_not_parsed): """"""Test span.sent property"""""" assert len(list(doc.sents)) assert doc[:2].sent.root.text == ""is"" assert doc[:2].sent.text == ""This is a sentence."" assert doc[6:7].sent.root.left_edge.text == ""This"" assert doc[0: len(doc)].sent == list(doc.sents)[0] with pytest.raises(ValueError) : doc_not_parsed[:2].sent # test on manual sbd doc_not_parsed[0].is_sent_start = True doc_not_parsed[5].is_sent_start = True assert doc_not_parsed[1:3].sent == doc_not_parsed[0:5] assert doc_not_parsed[10:14].sent == doc_not_parsed[5:] ","def test_spans_span_sent(doc, doc_not_parsed): """"""Test span.sent property"""""" assert len(list(doc.sents)) assert doc[:2].sent.root.text == ""is"" assert doc[:2].sent.text == ""This is a sentence."" assert doc[6:7].sent.root.left_edge.text == ""This"" assert doc[0 : len(doc)].sent == list(doc.sents)[0] assert list(doc[0 : len(doc)].sents) == list(doc.sents) with pytest.raises(ValueError) : doc_not_parsed[:2].sent # test on manual sbd doc_not_parsed[0].is_sent_start = True doc_not_parsed[5].is_sent_start = True assert doc_not_parsed[1:3].sent == doc_not_parsed[0:5] assert doc_not_parsed[10:14].sent == doc_not_parsed[5:] " 43382,"def CVNeuralNetLayer(theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k, wires=None): """"""pennylane.template.CVNeuralNetLayer(theta_1, phi_1, s, theta_2, phi_2, r, k, wires) A single layer of a CV Quantum Neural Network Implements a single layer from the the CV Quantum Neural Network (CVQNN) architecture of :cite:`killoran2018continuous` over :math:`N` wires. .. note:: The CV neural network architecture includes :class:`~.Kerr` operations. Make sure to use a suitable device, such as the :code:`strawberryfields.fock` device of the `PennyLane-SF `_ plugin. Args: theta_1 (array[float]): length :math:`N(N-1)/2` array of transmittivity angles for first interferometer phi_1 (array[float]): length :math:`N(N-1)/2` array of phase angles for first interferometer varphi_1 (array[float]): length :math:`N` array of rotation angles for first interferometer r (array[float]): length :math:`N` arrays of squeezing amounts for :class:`~.Squeezing` operations phi_r (array[float]): length :math:`N` arrays of squeezing angles for :class:`~.Squeezing` operations theta_2 (array[float]): length :math:`N(N-1)/2` array of transmittivity angles for second interferometer phi_2 (array[float]): length :math:`N(N-1)/2` array of phase angles for second interferometer varphi_2 (array[float]): length :math:`N` array of rotation angles for second interferometer a (array[float]): length :math:`N` arrays of displacement magnitudes for :class:`~.Displacement` operations phi_a (array[float]): length :math:`N` arrays of displacement angles for :class:`~.Displacement` operations k (array[float]): length :math:`N` arrays of kerr parameters for :class:`~.Kerr` operations Keyword Args: wires (Sequence[int]): wires the layer should act on """""" Interferometer(theta=theta_1, phi=phi_1, varphi=varphi_1, wires=wires) for i, wire in enumerate(wires): Squeezing(r[i], phi_r[i], wires=wire) Interferometer(theta=theta_2, phi=phi_2, varphi=varphi_2, wires=wires) for i, wire in enumerate(wires): Displacement(a[i], phi_a[i], wires=wire) for i, wire in enumerate(wires): Kerr(k[i], wires=wire) ","def CVNeuralNetLayer(theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k, wires=None): """"""pennylane.template.CVNeuralNetLayer(theta_1, phi_1, s, theta_2, phi_2, r, k, wires) A single layer of a CV Quantum Neural Network Implements a single layer from the the CV Quantum Neural Network (CVQNN) architecture of :cite:`killoran2018continuous` over :math:`N` wires. .. note:: The CV neural network architecture includes :class:`~.Kerr` operations. Make sure to use a suitable device, such as the :code:`strawberryfields.fock` device of the `PennyLane-SF `_ plugin. Args: theta_1 (array[float]): length :math:`N(N-1)/2` array of transmittivity angles for first interferometer phi_1 (array[float]): length :math:`N(N-1)/2` array of phase angles for first interferometer varphi_1 (array[float]): length :math:`N` array of rotation angles for first interferometer r (array[float]): length :math:`N` arrays of squeezing amounts for :class:`~.Squeezing` operations phi_r (array[float]): length :math:`N` arrays of squeezing angles for :class:`~.Squeezing` operations theta_2 (array[float]): length :math:`N(N-1)/2` array of transmittivity angles for second interferometer phi_2 (array[float]): length :math:`N(N-1)/2` array of phase angles for second interferometer varphi_2 (array[float]): length :math:`N` array of final rotation angles for second interferometer a (array[float]): length :math:`N` arrays of displacement magnitudes for :class:`~.Displacement` operations phi_a (array[float]): length :math:`N` arrays of displacement angles for :class:`~.Displacement` operations k (array[float]): length :math:`N` arrays of kerr parameters for :class:`~.Kerr` operations Keyword Args: wires (Sequence[int]): wires the layer should act on """""" Interferometer(theta=theta_1, phi=phi_1, varphi=varphi_1, wires=wires) for i, wire in enumerate(wires): Squeezing(r[i], phi_r[i], wires=wire) Interferometer(theta=theta_2, phi=phi_2, varphi=varphi_2, wires=wires) for i, wire in enumerate(wires): Displacement(a[i], phi_a[i], wires=wire) for i, wire in enumerate(wires): Kerr(k[i], wires=wire) " 42134,"def octobot_parser(parser): parser.add_argument('-v', '--version', help='Show OctoBot current version.', action='store_true') parser.add_argument('-s', '--simulate', help='Force OctoBot to start with the trader simulator only.', action='store_true') parser.add_argument('-rts', '--reset-trading-history', help='Force the traders to reset their history. They will ' 'now take the next portfolio as a reference for ' 'profitability and trading simulators will use a ' 'fresh new portfolio.', action='store_true') parser.add_argument('-b', '--backtesting', help='Start OctoBot in backesting mode using the backtesting ' 'config stored in config.json.', action='store_true') parser.add_argument('-bf', '--backtesting-files', type=str, nargs='+', help='Backtesting files to use (should be provided with -b or --backtesting).', required=False) parser.add_argument('-wdr', '--whole-data-range', help='On multiple files backtesting: run on the whole available data instead of the ' 'common part only (default behavior).', action='store_true') parser.add_argument('-dbt', '--disable-backtesting-timeout', help='To use when running long backtesting : disable backtesting watcher timeout ' 'that prevent backtesting from being stuck during execution.', action='store_false') parser.add_argument('-r', '--risk', type=float, help='Force a specific risk configuration (between 0 and 1).') parser.add_argument('-nw', '--no_web', help=""Don't start OctoBot web interface."", action='store_true') parser.add_argument('-nt', '--no-telegram', help='Start OctoBot without telegram interface, even if telegram ' 'credentials are in config. With this parameter, your Octobot ' 'won`t reply to any telegram command but is still able to listen ' 'to telegram feed and send telegram notifications', action='store_true') parser.add_argument('--encrypter', help=""Start the exchange api keys encrypter. This tool is useful to manually add"" "" exchanges configuration in your config.json without using any interface "" ""(ie the web interface that handle encryption automatically)."", action='store_true') parser.add_argument('--identifier', help=""OctoBot community identifier."", type=str, nargs=1) parser.add_argument('-o', '--strategy_optimizer', help='Start Octobot strategy optimizer. This mode will make ' 'octobot play backtesting scenarii located in ' 'abstract_strategy_test.py with different timeframes, ' 'evaluators and risk using the trading mode set in ' 'config.json. This tool is useful to quickly test a ' 'strategy and automatically find the best compatible ' 'settings. Param is the name of the strategy class to ' 'test. Example: -o TechnicalAnalysisStrategyEvaluator' ' Warning: this process may take a long time.', nargs='+') parser.set_defaults(func=start_octobot) # add sub commands subparsers = parser.add_subparsers(title=""Other commands"") # tentacles manager tentacles_parser = subparsers.add_parser(""tentacles"", help='Calls OctoBot tentacles manager.\n' 'Use ""tentacles --help"" to get the ' 'tentacles manager help.') tentacles_manager_cli.register_tentacles_manager_arguments(tentacles_parser) tentacles_parser.set_defaults(func=commands.call_tentacles_manager) ","def octobot_parser(parser): parser.add_argument('-v', '--version', help='Show OctoBot current version.', action='store_true') parser.add_argument('-s', '--simulate', help='Force OctoBot to start with the trader simulator only.', action='store_true') parser.add_argument('-rts', '--reset-trading-history', help='Force the traders to reset their history. They will ' 'now take the next portfolio as a reference for ' 'profitability and trading simulators will use a ' 'fresh new portfolio.', action='store_true') parser.add_argument('-b', '--backtesting', help='Start OctoBot in backesting mode using the backtesting ' 'config stored in config.json.', action='store_true') parser.add_argument('-bf', '--backtesting-files', type=str, nargs='+', help='Backtesting files to use (should be provided with -b or --backtesting).', required=False) parser.add_argument('-wdr', '--whole-data-range', help='On multiple files backtesting: run on the whole available data instead of the ' 'common part only (default behavior).', action='store_true') parser.add_argument('-dbt', '--disable-backtesting-timeout', help='To use when running long backtesting : disable backtesting watcher timeout ' 'that prevent backtesting from being interrupted during execution.', action='store_false') parser.add_argument('-r', '--risk', type=float, help='Force a specific risk configuration (between 0 and 1).') parser.add_argument('-nw', '--no_web', help=""Don't start OctoBot web interface."", action='store_true') parser.add_argument('-nt', '--no-telegram', help='Start OctoBot without telegram interface, even if telegram ' 'credentials are in config. With this parameter, your Octobot ' 'won`t reply to any telegram command but is still able to listen ' 'to telegram feed and send telegram notifications', action='store_true') parser.add_argument('--encrypter', help=""Start the exchange api keys encrypter. This tool is useful to manually add"" "" exchanges configuration in your config.json without using any interface "" ""(ie the web interface that handle encryption automatically)."", action='store_true') parser.add_argument('--identifier', help=""OctoBot community identifier."", type=str, nargs=1) parser.add_argument('-o', '--strategy_optimizer', help='Start Octobot strategy optimizer. This mode will make ' 'octobot play backtesting scenarii located in ' 'abstract_strategy_test.py with different timeframes, ' 'evaluators and risk using the trading mode set in ' 'config.json. This tool is useful to quickly test a ' 'strategy and automatically find the best compatible ' 'settings. Param is the name of the strategy class to ' 'test. Example: -o TechnicalAnalysisStrategyEvaluator' ' Warning: this process may take a long time.', nargs='+') parser.set_defaults(func=start_octobot) # add sub commands subparsers = parser.add_subparsers(title=""Other commands"") # tentacles manager tentacles_parser = subparsers.add_parser(""tentacles"", help='Calls OctoBot tentacles manager.\n' 'Use ""tentacles --help"" to get the ' 'tentacles manager help.') tentacles_manager_cli.register_tentacles_manager_arguments(tentacles_parser) tentacles_parser.set_defaults(func=commands.call_tentacles_manager) " 56640,"def normalize_ddc(ddc): """""" :param str ddc: :rtype: list of str """""" ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace(""'"", '') results = [] for match in DDC_RE.finditer(ddc): parts = match.groupdict() prefix = '' suffix = '' # DDCs should start at word boundaries start = match.start() if start > 0 and re.search(r'\b', ddc[start - 1]): continue # And end at them end = match.end() if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]): continue # Some old standard which isn't used anymore; might need to filter these # out, but they should sort OK so let's keep them. if parts['neg']: prefix += '-' # Juvenile prefix if parts['j']: prefix += 'j' # Star should be at end if parts['prestar'] or parts['poststar']: suffix = '*' # Series suffix if parts['s']: suffix += ' s' # Biographical if parts['B']: suffix += ' B' # Not at all sure if parts['ninetwo']: suffix += parts['ninetwo'] # And now the actual number! if parts['number']: # Numbers in parenthesis are ""series"" numbers end = match.end('number') if end < len(ddc) and ddc[end] == ')': suffix += ' s' # pad the integer part of the number number_parts = parts['number'].split('.') integer = number_parts[0] # Copy decimal without losing precision decimal = '.' + number_parts[1] if len(number_parts) > 1 else '' number = '%03d%s' % (int(integer), decimal) # Handle [Fic] or [E] elif parts['fic']: number = '[%s]' % parts['fic'].title() else: continue results.append(prefix + number + suffix) return results ","def normalize_ddc(ddc): """""" :param str ddc: :rtype: list of str """""" ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace(""'"", '') results = [] for match in DDC_RE.finditer(ddc): parts = match.groupdict() prefix = '' suffix = '' # DDCs should start at word boundaries start = match.start() if start > 0 and re.search(r'\b', ddc[start - 1]): continue # And end at them end = match.end() if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]): continue # Some old standard which isn't used anymore; might need to filter these # out, but they should sort OK so let's keep them. if parts['neg']: prefix += '-' # Juvenile prefix if parts['j']: prefix += 'j' # Star should be at end if parts['prestar'] or parts['poststar']: suffix = '*' # Series suffix if parts['s']: suffix += ' s' # Biographical if parts['B']: suffix += ' B' # Not at all sure if parts['ninetwo']: suffix += parts['ninetwo'] # And now the actual number! if parts['number']: # Numbers in parenthesis are ""series"" numbers end = match.end('number') if end < len(ddc) and ddc[end] == ')': suffix += ' s' # pad the integer part of the number number_parts = parts['number'].split('.') integer = number_parts[0] # Copy decimal without losing precision decimal = '.' + number_parts[1] if len(number_parts) > 1 else '' number = '%03d%s' % (int(integer), decimal) # Handle [Fic] or [E] elif parts['fic']: number = '[%s]' % parts['fic'].title() else: return '' return prefix + number + suffix return results " 23005,"def test_nbytes_auto(): chunks = normalize_chunks(""800B"", shape=(500,), dtype='float64') assert chunks == ((100, 100, 100, 100, 100),) chunks = normalize_chunks(""200B"", shape=(10,10), dtype='float64') assert chunks == ((5, 5), (5, 5)) chunks = normalize_chunks(""33B"", shape=(10,10), dtype='float64') assert chunks == ((2, 2, 2, 2, 2), (2, 2, 2, 2, 2)) chunks = normalize_chunks(""1800B"", shape=(10, 20, 30), dtype='float64') assert chunks == ((5, 5), (5, 5, 5, 5), (6, 6, 6, 6, 6)) with pytest.raises(ValueError): normalize_chunks((""10B""), shape=(10,), limit=5, dtype='float64') with pytest.raises(ValueError): normalize_chunks((""100B"", ""10B""), shape=(10,10), dtype='float64') with pytest.raises(ValueError): normalize_chunks((""100B"", ""10B""), shape=(10,10), limit=20, dtype='float64') ","def test_nbytes_auto(): chunks = normalize_chunks(""800B"", shape=(500,), dtype='float64') assert chunks == ((100, 100, 100, 100, 100),) chunks = normalize_chunks(""200B"", shape=(10,10), dtype='float64') assert chunks == ((5, 5), (5, 5)) chunks = normalize_chunks(""33B"", shape=(10,10), dtype='float64') assert chunks == ((2, 2, 2, 2, 2), (2, 2, 2, 2, 2)) chunks = normalize_chunks(""1800B"", shape=(10, 20, 30), dtype='float64') assert chunks == ((5, 5), (5, 5, 5, 5), (6, 6, 6, 6, 6)) with pytest.raises(ValueError): normalize_chunks((""10B""), shape=(10,), limit=5, dtype='float64') with pytest.raises(ValueError): normalize_chunks((""100B"", ""10B""), shape=(10,10), dtype='float64') with pytest.raises(ValueError): normalize_chunks(""100B"", shape=(10,10), limit=20, dtype='float64') " 40727,"def test_num_classes_wrong_input(): with pytest.raises(ValueError, match=""Argument num_classes needs to be >1""): ConfusionMatrix(num_classes=1) ","def test_num_classes_wrong_input(): with pytest.raises(ValueError, match=""Argument num_classes needs to be > 1""): ConfusionMatrix(num_classes=1) " 58152,"def main(): """""" PARSE AND VALIDATE INTEGRATION PARAMS """""" # get the service API url base_url = demisto.params()['url'].strip('/') verify_certificate = not demisto.params().get('insecure', False) proxy = demisto.params().get('proxy', False) client_id = demisto.params().get('credentials').get('identifier') client_secret = demisto.params().get('credentials').get('password') oauth_url = demisto.params().get('oauth_url') default_tsg_id = demisto.params().get('tsg_id') LOG(f'Command being called is {demisto.command()}') commands = { 'test-module': test_module, 'prisma-access-create-security-rule': create_security_rule_command, 'prisma-access-list-security-rules': list_security_rules_command, 'prisma-access-push-candidate-config': push_candidate_config_command, 'prisma-access-get-config-jobs-by-id': get_config_jobs_by_id_command, 'prisma-access-list-config-jobs': list_config_jobs_command, 'prisma-access-update-security-rule': update_security_rule_command, 'prisma-access-get-security-rule-by-name': get_security_rule_by_name_command, 'prisma-access-query-agg-monitor-api': query_agg_monitor_api_command, 'prisma-access-delete-security-rule': delete_security_rule_command, 'prisma-access-create-address-object': create_address_object_command, 'prisma-access-edit-address-object': edit_address_object_command, 'prisma-access-delete-address-object': delete_address_object_command, 'prisma-access-list-address-objects': list_address_objects_command } command = demisto.command() client = Client( base_url=base_url, client_id=client_id, client_secret=client_secret, oauth_url=oauth_url, verify=verify_certificate, headers={ 'Accept': 'application/json', 'Content-Type': 'application/json' }, proxy=proxy, ok_codes=(200, 201, 204)) try: if command in commands: return_results(commands[command](client, demisto.args(), default_tsg_id)) else: raise NotImplementedError(f'Command ""{command}"" is not implemented.') # Log exceptions except Exception as e: return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}') ","def main(): """""" PARSE AND VALIDATE INTEGRATION PARAMS """""" # get the service API url base_url = demisto.params()['url'].strip('/') verify_certificate = not demisto.params().get('insecure', False) proxy = demisto.params().get('proxy', False) client_id = demisto.params().get('credentials').get('identifier') client_secret = demisto.params().get('credentials').get('password') oauth_url = demisto.params().get('oauth_url') default_tsg_id = demisto.params().get('tsg_id') command = demisto.command() demisto.debug(f'Command being called is {command}') commands = { 'test-module': test_module, 'prisma-access-create-security-rule': create_security_rule_command, 'prisma-access-list-security-rules': list_security_rules_command, 'prisma-access-push-candidate-config': push_candidate_config_command, 'prisma-access-get-config-jobs-by-id': get_config_jobs_by_id_command, 'prisma-access-list-config-jobs': list_config_jobs_command, 'prisma-access-update-security-rule': update_security_rule_command, 'prisma-access-get-security-rule-by-name': get_security_rule_by_name_command, 'prisma-access-query-agg-monitor-api': query_agg_monitor_api_command, 'prisma-access-delete-security-rule': delete_security_rule_command, 'prisma-access-create-address-object': create_address_object_command, 'prisma-access-edit-address-object': edit_address_object_command, 'prisma-access-delete-address-object': delete_address_object_command, 'prisma-access-list-address-objects': list_address_objects_command } command = demisto.command() client = Client( base_url=base_url, client_id=client_id, client_secret=client_secret, oauth_url=oauth_url, verify=verify_certificate, headers={ 'Accept': 'application/json', 'Content-Type': 'application/json' }, proxy=proxy, ok_codes=(200, 201, 204)) try: if command in commands: return_results(commands[command](client, demisto.args(), default_tsg_id)) else: raise NotImplementedError(f'Command ""{command}"" is not implemented.') # Log exceptions except Exception as e: return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}') " 28128,"def parse_number_from_string(var: str) -> Union[int, float]: """"""Tries to parse a number (int or float) from the given string. If no easy conversion could be made, raise a ValueError."""""" try: return int(var) except ValueError: pass try: return float(var) except ValueError: pass raise ValueError(f""could not convert string to int() with base 10 or float: {var}"") ","def parse_number_from_string(var: str) -> Union[int, float]: """"""Tries to parse a number (int or float) from the given string. If no easy conversion could be made, raise a ValueError."""""" try: return int(var) except ValueError: pass try: return float(var) except ValueError: pass raise ValueError(f""could not convert string to number using int() or float(): {var}"") " 33890,"def get_job_submission_client_cluster_info( address: str, create_cluster_if_needed: bool) -> ClusterInfo: """"""Get address, cookies, and metadata used for ray.dashboard.modules.job.sdk.JobSubmissionClient. Args: address (str): Address without the module prefix that is passed to ray.dashboard.modules.job.sdk.JobSubmissionClient. create_cluster_if_needed (bool): Indicates whether the cluster of the address returned needs to be running. Ray doesn't start a cluster before interacting with jobs, but other implementations may do so. Returns: ClusterInfo object consisting of address, cookies, and metadata for JobSubmissionClient to use. """""" return ClusterInfo(""http://"" + address, None, {}) ","def get_job_submission_client_cluster_info( address: str, create_cluster_if_needed: bool) -> ClusterInfo: """"""Get address, cookies, and metadata used for ray.dashboard.modules.job.sdk.JobSubmissionClient. Args: address (str): Address without the module prefix that is passed to JobSubmissionClient. create_cluster_if_needed (bool): Indicates whether the cluster of the address returned needs to be running. Ray doesn't start a cluster before interacting with jobs, but other implementations may do so. Returns: ClusterInfo object consisting of address, cookies, and metadata for JobSubmissionClient to use. """""" return ClusterInfo(""http://"" + address, None, {}) " 363,"def Data( name: str, value, *, dims: Optional[Sequence[str]] = None, export_index_as_coords=False, mutable: Optional[bool] = None, **kwargs, ) -> Union[SharedVariable, TensorConstant]: """"""Data container that registers a data variable with the model. Depending on the ``mutable`` setting (default: True), the variable is registered as a :class:`~aesara.compile.sharedvalue.SharedVariable`, enabling it to be altered in value and shape, but NOT in dimensionality using :func:`pymc.set_data`. To set the value of the data container variable, check out :func:`pymc.Model.set_data`. For more information, take a look at this example notebook https://docs.pymc.io/projects/examples/en/latest/pymc3_howto/data_container.html Parameters ---------- name : str The name for this variable. value : list or ndarray or pd.Series, pd.Dataframe A value to associate with this variable. dims : str or tuple of str, optional Dimension names of the random variables (as opposed to the shapes of these random variables). Use this when ``value`` is a pandas Series or DataFrame. The ``dims`` will then be the name of the Series / DataFrame's columns. See ArviZ documentation for more information about dimensions and coordinates: :ref:`arviz:quickstart`. If this parameter is not specified, the random variables will not have dimension names. export_index_as_coords : bool, default=False If True, the ``Data`` container will try to infer what the coordinates should be if there is an index in ``value``. mutable : bool, optional Switches between creating a :class:`~aesara.compile.sharedvalue.SharedVariable` (``mutable=True``) vs. creating a :class:`~aesara.tensor.TensorConstant` (``mutable=False``). Consider using :class:`pymc.ConstantData` or :class:`pymc.MutableData` as less verbose alternatives to ``pm.Data(..., mutable=...)``. If this parameter is not specified, the value it takes will depend on the version of the package. Since ``v4.1.0`` the default value is ``mutable=False``, with previous versions having ``mutable=True``. **kwargs : dict, optional Extra arguments passed to :func:`aesara.shared`. Examples -------- >>> import pymc as pm >>> import numpy as np >>> # We generate 10 datasets >>> true_mu = [np.random.randn() for _ in range(10)] >>> observed_data = [mu + np.random.randn(20) for mu in true_mu] >>> with pm.Model() as model: ... data = pm.MutableData('data', observed_data[0]) ... mu = pm.Normal('mu', 0, 10) ... pm.Normal('y', mu=mu, sigma=1, observed=data) >>> # Generate one trace for each dataset >>> idatas = [] >>> for data_vals in observed_data: ... with model: ... # Switch out the observed dataset ... model.set_data('data', data_vals) ... idatas.append(pm.sample()) """""" if isinstance(value, list): value = np.array(value) # Add data container to the named variables of the model. try: model = pm.Model.get_context() except TypeError: raise TypeError( ""No model on context stack, which is needed to instantiate a data container. "" ""Add variable inside a 'with model:' block."" ) name = model.name_for(name) # `pandas_to_array` takes care of parameter `value` and # transforms it to something digestible for Aesara. arr = pandas_to_array(value) if mutable is None: major, minor = (int(v) for v in pm.__version__.split(""."")[:2]) mutable = major == 4 and minor < 1 if mutable: warnings.warn( ""The `mutable` kwarg was not specified. Currently it defaults to `pm.Data(mutable=True)`,"" "" which is equivalent to using `pm.MutableData()`."" "" In v4.1.0 the default will change to `pm.Data(mutable=False)`, equivalent to `pm.ConstantData`."" "" Set `pm.Data(..., mutable=False/True)`, or use `pm.ConstantData`/`pm.MutableData`."", FutureWarning, ) if mutable: x = aesara.shared(arr, name, **kwargs) else: x = at.as_tensor_variable(arr, name, **kwargs) if isinstance(dims, str): dims = (dims,) if not (dims is None or len(dims) == x.ndim): raise pm.exceptions.ShapeError( ""Length of `dims` must match the dimensions of the dataset."", actual=len(dims), expected=x.ndim, ) coords = determine_coords(model, value, dims) if export_index_as_coords: model.add_coords(coords) elif dims: # Register new dimension lengths for d, dname in enumerate(dims): if not dname in model.dim_lengths: model.add_coord(dname, values=None, length=x.shape[d]) model.add_random_variable(x, dims=dims) return x ","def Data( name: str, value, *, dims: Optional[Sequence[str]] = None, export_index_as_coords=False, mutable: Optional[bool] = None, **kwargs, ) -> Union[SharedVariable, TensorConstant]: """"""Data container that registers a data variable with the model. Depending on the ``mutable`` setting (default: True), the variable is registered as a :class:`~aesara.compile.sharedvalue.SharedVariable`, enabling it to be altered in value and shape, but NOT in dimensionality using :func:`pymc.set_data`. To set the value of the data container variable, check out :func:`pymc.Model.set_data`. For more information, take a look at this example notebook https://docs.pymc.io/projects/examples/en/latest/pymc3_howto/data_container.html Parameters ---------- name : str The name for this variable. value : array_like or pd.Series, pd.Dataframe A value to associate with this variable. dims : str or tuple of str, optional Dimension names of the random variables (as opposed to the shapes of these random variables). Use this when ``value`` is a pandas Series or DataFrame. The ``dims`` will then be the name of the Series / DataFrame's columns. See ArviZ documentation for more information about dimensions and coordinates: :ref:`arviz:quickstart`. If this parameter is not specified, the random variables will not have dimension names. export_index_as_coords : bool, default=False If True, the ``Data`` container will try to infer what the coordinates should be if there is an index in ``value``. mutable : bool, optional Switches between creating a :class:`~aesara.compile.sharedvalue.SharedVariable` (``mutable=True``) vs. creating a :class:`~aesara.tensor.TensorConstant` (``mutable=False``). Consider using :class:`pymc.ConstantData` or :class:`pymc.MutableData` as less verbose alternatives to ``pm.Data(..., mutable=...)``. If this parameter is not specified, the value it takes will depend on the version of the package. Since ``v4.1.0`` the default value is ``mutable=False``, with previous versions having ``mutable=True``. **kwargs : dict, optional Extra arguments passed to :func:`aesara.shared`. Examples -------- >>> import pymc as pm >>> import numpy as np >>> # We generate 10 datasets >>> true_mu = [np.random.randn() for _ in range(10)] >>> observed_data = [mu + np.random.randn(20) for mu in true_mu] >>> with pm.Model() as model: ... data = pm.MutableData('data', observed_data[0]) ... mu = pm.Normal('mu', 0, 10) ... pm.Normal('y', mu=mu, sigma=1, observed=data) >>> # Generate one trace for each dataset >>> idatas = [] >>> for data_vals in observed_data: ... with model: ... # Switch out the observed dataset ... model.set_data('data', data_vals) ... idatas.append(pm.sample()) """""" if isinstance(value, list): value = np.array(value) # Add data container to the named variables of the model. try: model = pm.Model.get_context() except TypeError: raise TypeError( ""No model on context stack, which is needed to instantiate a data container. "" ""Add variable inside a 'with model:' block."" ) name = model.name_for(name) # `pandas_to_array` takes care of parameter `value` and # transforms it to something digestible for Aesara. arr = pandas_to_array(value) if mutable is None: major, minor = (int(v) for v in pm.__version__.split(""."")[:2]) mutable = major == 4 and minor < 1 if mutable: warnings.warn( ""The `mutable` kwarg was not specified. Currently it defaults to `pm.Data(mutable=True)`,"" "" which is equivalent to using `pm.MutableData()`."" "" In v4.1.0 the default will change to `pm.Data(mutable=False)`, equivalent to `pm.ConstantData`."" "" Set `pm.Data(..., mutable=False/True)`, or use `pm.ConstantData`/`pm.MutableData`."", FutureWarning, ) if mutable: x = aesara.shared(arr, name, **kwargs) else: x = at.as_tensor_variable(arr, name, **kwargs) if isinstance(dims, str): dims = (dims,) if not (dims is None or len(dims) == x.ndim): raise pm.exceptions.ShapeError( ""Length of `dims` must match the dimensions of the dataset."", actual=len(dims), expected=x.ndim, ) coords = determine_coords(model, value, dims) if export_index_as_coords: model.add_coords(coords) elif dims: # Register new dimension lengths for d, dname in enumerate(dims): if not dname in model.dim_lengths: model.add_coord(dname, values=None, length=x.shape[d]) model.add_random_variable(x, dims=dims) return x " 48760,"def upgrade(): # noqa: D103 if context.config.get_main_option('sqlalchemy.url').startswith('mysql'): op.alter_column(table_name='dag_code', column_name='source_code', type_=mysql.MEDIUMTEXT) ","def upgrade(): # noqa: D103 conn = op.get_bind() # pylint: disable=no-member if conn.dialect.name == ""mysql"": op.alter_column(table_name='dag_code', column_name='source_code', type_=mysql.MEDIUMTEXT) " 34320,"def validate_requires_one_of( properties: Tuple[Text], provided_properties: Set[Text], component_name: Text ): """"""Validates that at least one of the given properties is present in the provided properties."""""" property_present = False for property in properties: if property in provided_properties: property_present = True break if not property_present: raise Exception( f""Failed to validate component {component_name}. "" f""Missing one of the following properties: "" f""{properties}."" ) ","def validate_requires_one_of( required_properties: Tuple[Text], provided_properties: Set[Text], component_name: Text ): """"""Validates that at least one of the given properties is present in the provided properties."""""" property_present = False for property in properties: if property in provided_properties: property_present = True break if not property_present: raise Exception( f""Failed to validate component {component_name}. "" f""Missing one of the following properties: "" f""{properties}."" ) " 35498,"def update_v_cruise(v_cruise_kph, buttonEvents, button_timers, v_ego, gas_pressed, enabled, metric): # handle button presses. TODO: this should be in state_control, but a decelCruise press # would have the effect of both enabling and changing speed is checked after the state transition if not enabled: return v_cruise_kph long_press = False button_type = None # should be CV.MPH_TO_KPH, but this causes rounding errors v_cruise_delta = 1. if metric else 1.6 for b in buttonEvents: if b.type.raw in button_timers and not b.pressed: if button_timers[b.type.raw] > CRUISE_LONG_PRESS: return v_cruise_kph # end long press button_type = b.type.raw break else: for k in button_timers.keys(): if button_timers[k] and button_timers[k] % CRUISE_LONG_PRESS == 0: button_type = k long_press = True break # If set is pressed while overriding, reset cruise speed to vEgo if enabled and gas_pressed and button_type == car.CarState.ButtonEvent.Type.decelCruise: return int(round(clip(v_ego * CV.MS_TO_KPH, V_CRUISE_ENABLE_MIN, V_CRUISE_MAX))) if button_type: v_cruise_delta = v_cruise_delta * (5 if long_press else 1) if long_press and v_cruise_kph % v_cruise_delta != 0: # partial interval v_cruise_kph = CRUISE_NEAREST_FUNC[button_type](v_cruise_kph / v_cruise_delta) * v_cruise_delta else: v_cruise_kph += v_cruise_delta * CRUISE_INTERVAL_SIGN[button_type] v_cruise_kph = clip(round(v_cruise_kph, 1), V_CRUISE_MIN, V_CRUISE_MAX) return v_cruise_kph ","def update_v_cruise(v_cruise_kph, buttonEvents, button_timers, v_ego, gas_pressed, enabled, metric): # handle button presses. TODO: this should be in state_control, but a decelCruise press # would have the effect of both enabling and changing speed is checked after the state transition if not enabled: return v_cruise_kph long_press = False button_type = None # should be CV.MPH_TO_KPH, but this causes rounding errors v_cruise_delta = 1. if metric else 1.6 for b in buttonEvents: if b.type.raw in button_timers and not b.pressed: if button_timers[b.type.raw] > CRUISE_LONG_PRESS: return v_cruise_kph # end long press button_type = b.type.raw break else: for k in button_timers.keys(): if button_timers[k] and button_timers[k] % CRUISE_LONG_PRESS == 0: button_type = k long_press = True break # If set is pressed while overriding, reset cruise speed to vEgo if gas_pressed and button_type == car.CarState.ButtonEvent.Type.decelCruise: return int(round(clip(v_ego * CV.MS_TO_KPH, V_CRUISE_ENABLE_MIN, V_CRUISE_MAX))) if button_type: v_cruise_delta = v_cruise_delta * (5 if long_press else 1) if long_press and v_cruise_kph % v_cruise_delta != 0: # partial interval v_cruise_kph = CRUISE_NEAREST_FUNC[button_type](v_cruise_kph / v_cruise_delta) * v_cruise_delta else: v_cruise_kph += v_cruise_delta * CRUISE_INTERVAL_SIGN[button_type] v_cruise_kph = clip(round(v_cruise_kph, 1), V_CRUISE_MIN, V_CRUISE_MAX) return v_cruise_kph " 4548,"def run_glm(Y, X, noise_model='ar1', bins=100, n_jobs=1, verbose=0): """""" GLM fit for an fMRI data matrix Parameters ---------- Y : array of shape (n_time_points, n_voxels) The fMRI data. X : array of shape (n_time_points, n_regressors) The design matrix. noise_model : {'ar1', 'ols'}, optional The temporal variance model. Defaults to 'ar1'. bins : int, optional Maximum number of discrete bins for the AR(1) coef histogram. n_jobs : int, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : int, optional The verbosity level. Defaut is 0 Returns ------- labels : array of shape (n_voxels,), A map of values on voxels used to identify the corresponding model. results : dict, Keys correspond to the different labels values values are RegressionResults instances corresponding to the voxels. """""" acceptable_noise_models = ['ols'] if not (('ar' in noise_model) or (noise_model in acceptable_noise_models)): raise ValueError( ""Acceptable noise models are {0}. You provided "" ""'noise_model={1}'"".format(acceptable_noise_models, noise_model) ) if Y.shape[0] != X.shape[0]: raise ValueError('The number of rows of Y ' 'should match the number of rows of X.' ' You provided X with shape {0} ' 'and Y with shape {1}'. format(X.shape, Y.shape)) # Create the model ols_result = OLSModel(X).fit(Y) if 'ar' in noise_model: ar_order = int(re.split('ar', noise_model)[1]) # compute and discretize the AR1 coefs ar1 = [_yule_walker(ols_result.residuals[:, res].reshape(-1, 1).T, order=ar_order) for res in range(ols_result.residuals.shape[1])] if len(ar1[0]) == 1: ar1 = np.asarray(ar1).ravel() del ols_result for idx in range(len(ar1)): ar1[idx] = (ar1[idx] * bins).astype(np.int) * 1. / bins # Fit the AR model acccording to current AR(N) estimates results = {} labels = ar1 # Parallelize by creating a job per ARModel if type(ar1[0]) is np.float64: vals = np.unique(ar1) else: vals = ar1 ar_result = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_ar_model_fit)(X, val, Y[:, [np.all(lab == val) for lab in labels]]) for val in vals) if type(ar1[0]) is np.float64: for val, result in zip(vals, ar_result): results[val] = result else: labels = ['_'.join([str(v) for v in val]) for val in vals] for val, result in zip(vals, ar_result): results['_'.join([str(v) for v in val])] = result del vals del ar_result else: labels = np.zeros(Y.shape[1]) results = {0.0: ols_result} return labels, results ","def run_glm(Y, X, noise_model='ar1', bins=100, n_jobs=1, verbose=0): """""" GLM fit for an fMRI data matrix Parameters ---------- Y : array of shape (n_time_points, n_voxels) The fMRI data. X : array of shape (n_time_points, n_regressors) The design matrix. noise_model : {'ar1', 'ols'}, optional The temporal variance model. Defaults to 'ar1'. bins : int, optional Maximum number of discrete bins for the AR(1) coef histogram. n_jobs : int, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : int, optional The verbosity level. Defaut is 0 Returns ------- labels : array of shape (n_voxels,), A map of values on voxels used to identify the corresponding model. results : dict, Keys correspond to the different labels values values are RegressionResults instances corresponding to the voxels. """""" acceptable_noise_models = ['ols'] if (('ar' not in noise_model) and (noise_model is not 'ols')): raise ValueError( ""Acceptable noise models are {0}. You provided "" ""'noise_model={1}'"".format(acceptable_noise_models, noise_model) ) if Y.shape[0] != X.shape[0]: raise ValueError('The number of rows of Y ' 'should match the number of rows of X.' ' You provided X with shape {0} ' 'and Y with shape {1}'. format(X.shape, Y.shape)) # Create the model ols_result = OLSModel(X).fit(Y) if 'ar' in noise_model: ar_order = int(re.split('ar', noise_model)[1]) # compute and discretize the AR1 coefs ar1 = [_yule_walker(ols_result.residuals[:, res].reshape(-1, 1).T, order=ar_order) for res in range(ols_result.residuals.shape[1])] if len(ar1[0]) == 1: ar1 = np.asarray(ar1).ravel() del ols_result for idx in range(len(ar1)): ar1[idx] = (ar1[idx] * bins).astype(np.int) * 1. / bins # Fit the AR model acccording to current AR(N) estimates results = {} labels = ar1 # Parallelize by creating a job per ARModel if type(ar1[0]) is np.float64: vals = np.unique(ar1) else: vals = ar1 ar_result = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_ar_model_fit)(X, val, Y[:, [np.all(lab == val) for lab in labels]]) for val in vals) if type(ar1[0]) is np.float64: for val, result in zip(vals, ar_result): results[val] = result else: labels = ['_'.join([str(v) for v in val]) for val in vals] for val, result in zip(vals, ar_result): results['_'.join([str(v) for v in val])] = result del vals del ar_result else: labels = np.zeros(Y.shape[1]) results = {0.0: ols_result} return labels, results " 30028,"def _check_spaces(env: gym.Env): """"""Check that the observation and action spaces are defined and inherit from gym.spaces.Space. Args: env: The environment's observation and action space to check """""" # Helper to link to the code, because gym has no proper documentation gym_spaces = "" cf https://github.com/openai/gym/blob/master/gym/spaces/"" assert hasattr(env, ""observation_space""), ( ""You must specify an observation space (cf gym.spaces)"" + gym_spaces ) assert hasattr(env, ""action_space""), ( ""You must specify an action space (cf gym.spaces)"" + gym_spaces ) assert isinstance(env.observation_space, Space), ( ""The observation space must inherit from gym.spaces"" + gym_spaces ) assert isinstance(env.action_space, Space), ( ""The action space must inherit from gym.spaces"" + gym_spaces ) ","def _check_spaces(env: gym.Env): """"""Check that the observation and action spaces are defined and inherit from :class:`gym.spaces.Space`. Args: env: The environment's observation and action space to check """""" # Helper to link to the code, because gym has no proper documentation gym_spaces = "" cf https://github.com/openai/gym/blob/master/gym/spaces/"" assert hasattr(env, ""observation_space""), ( ""You must specify an observation space (cf gym.spaces)"" + gym_spaces ) assert hasattr(env, ""action_space""), ( ""You must specify an action space (cf gym.spaces)"" + gym_spaces ) assert isinstance(env.observation_space, Space), ( ""The observation space must inherit from gym.spaces"" + gym_spaces ) assert isinstance(env.action_space, Space), ( ""The action space must inherit from gym.spaces"" + gym_spaces ) " 52463,"def fetch_asset( project_path: Path, url: str, dest: Path, checksum: Optional[str] = None ) -> None: """"""Fetch an asset from a given URL or path. If a checksum is provided and a local file exists, it's only re-downloaded if the checksum doesn't match. project_path (Path): Path to project directory. url (str): URL or path to asset. checksum (Optional[str]): Optional expected checksum of local file. RETURNS (Optional[Path]): The path to the fetched asset or None if fetching the asset failed. """""" dest_path = (project_path / dest).resolve() if dest_path.exists(): # If there's already a file, check for checksum if checksum: if checksum == get_checksum(dest_path): msg.good(f""Skipping download with matching checksum: {dest}"") return else: # If there's not a checksum, make sure the file is a possibly valid size if os.path.getsize(dest_path) == 0: os.remove(dest_path) msg.warn(f""Asset exists but with size of 0 bytes, deleting: {dest}"") # We might as well support the user here and create parent directories in # case the asset dir isn't listed as a dir to create in the project.yml if not dest_path.parent.exists(): dest_path.parent.mkdir(parents=True) with working_dir(project_path): url = convert_asset_url(url) try: download_file(url, dest_path) msg.good(f""Downloaded asset {dest}"") except requests.exceptions.RequestException as e: if Path(url).exists() and Path(url).is_file(): # If it's a local file, copy to destination shutil.copy(url, str(dest_path)) msg.good(f""Copied local asset {dest}"") else: msg.fail(f""Download failed: {dest}"", e) if checksum and checksum != get_checksum(dest_path): msg.fail(f""Checksum doesn't match value defined in {PROJECT_FILE}: {dest}"") ","def fetch_asset( project_path: Path, url: str, dest: Path, checksum: Optional[str] = None ) -> None: """"""Fetch an asset from a given URL or path. If a checksum is provided and a local file exists, it's only re-downloaded if the checksum doesn't match. project_path (Path): Path to project directory. url (str): URL or path to asset. checksum (Optional[str]): Optional expected checksum of local file. RETURNS (Optional[Path]): The path to the fetched asset or None if fetching the asset failed. """""" dest_path = (project_path / dest).resolve() if dest_path.exists(): # If there's already a file, check for checksum if checksum: if checksum == get_checksum(dest_path): msg.good(f""Skipping download with matching checksum: {dest}"") return else: # If there's not a checksum, make sure the file is a possibly valid size if os.path.getsize(dest_path) == 0: msg.warn(f""Asset exists but with size of 0 bytes, deleting: {dest}"") os.remove(dest_path) # We might as well support the user here and create parent directories in # case the asset dir isn't listed as a dir to create in the project.yml if not dest_path.parent.exists(): dest_path.parent.mkdir(parents=True) with working_dir(project_path): url = convert_asset_url(url) try: download_file(url, dest_path) msg.good(f""Downloaded asset {dest}"") except requests.exceptions.RequestException as e: if Path(url).exists() and Path(url).is_file(): # If it's a local file, copy to destination shutil.copy(url, str(dest_path)) msg.good(f""Copied local asset {dest}"") else: msg.fail(f""Download failed: {dest}"", e) if checksum and checksum != get_checksum(dest_path): msg.fail(f""Checksum doesn't match value defined in {PROJECT_FILE}: {dest}"") " 13001,"def parse_draftjs_content_to_string(definitions: dict): string = """" blocks = definitions.get(""blocks"") if not blocks or not isinstance(blocks, list): return """" for block in blocks: text = block.get(""text"") if not text: continue string += ""{} "".format(text) return string ","def parse_draftjs_content_to_string(definitions: dict): string = """" blocks = definitions.get(""blocks"") if not blocks or not isinstance(blocks, list): return """" for block in blocks: text = block.get(""text"") if not text: continue string += f""{text} "" return string " 11835,"def pilinfo(out=None, brief=False): if out is None: out = sys.stdout Image.init() print(""-"" * 68, file=out) print(""Pillow {}"".format(PIL.__version__), file=out) print(""-"" * 68, file=out) print( ""Python modules loaded from {}"".format(os.path.dirname(Image.__file__)), file=out, ) print( ""Binary modules loaded from {}"".format(os.path.dirname(Image.core.__file__)), file=out, ) print(""-"" * 68, file=out) if not brief: v = sys.version.splitlines() print(""Python {}"".format(v[0].strip()), file=out) for v in v[1:]: print("" {}"".format(v.strip()), file=out) print(""-"" * 68, file=out) for name, feature in [ (""pil"", ""PIL CORE""), (""tkinter"", ""TKINTER""), (""freetype2"", ""FREETYPE2""), (""littlecms2"", ""LITTLECMS2""), (""webp"", ""WEBP""), (""transp_webp"", ""WEBP Transparency""), (""webp_mux"", ""WEBPMUX""), (""webp_anim"", ""WEBP Animation""), (""jpg"", ""JPEG""), (""jpg_2000"", ""OPENJPEG (JPEG2000)""), (""zlib"", ""ZLIB (PNG/ZIP)""), (""libtiff"", ""LIBTIFF""), (""raqm"", ""RAQM (Bidirectional Text)""), (""libimagequant"", ""LIBIMAGEQUANT (quantization method)""), ]: if check(name): print(""---"", feature, ""support ok"", file=out) else: print(""***"", feature, ""support not installed"", file=out) print(""-"" * 68, file=out) if not brief: extensions = collections.defaultdict(list) for ext, i in Image.EXTENSION.items(): extensions[i].append(ext) for i in sorted(Image.ID): line = ""{}"".format(i) if i in Image.MIME: line = ""{} {}"".format(line, Image.MIME[i]) print(line, file=out) if i in extensions: print( ""Extensions: {}"".format("", "".join(sorted(extensions[i]))), file=out ) features = [] if i in Image.OPEN: features.append(""open"") if i in Image.SAVE: features.append(""save"") if i in Image.SAVE_ALL: features.append(""save_all"") if i in Image.DECODERS: features.append(""decode"") if i in Image.ENCODERS: features.append(""encode"") print(""Features: {}"".format("", "".join(features)), file=out) print(""-"" * 68, file=out) ","def pilinfo(out=None, brief=False): if out is None: out = sys.stdout Image.init() print(""-"" * 68, file=out) print(""Pillow {}"".format(PIL.__version__), file=out) print(""-"" * 68, file=out) print( ""Python modules loaded from {}"".format(os.path.dirname(Image.__file__)), file=out, ) print( ""Binary modules loaded from {}"".format(os.path.dirname(Image.core.__file__)), file=out, ) print(""-"" * 68, file=out) if not brief: v = sys.version.splitlines() print(""Python {}"".format(v[0].strip()), file=out) for v in v[1:]: print("" {}"".format(v.strip()), file=out) print(""-"" * 68, file=out) for name, feature in [ (""pil"", ""PIL CORE""), (""tkinter"", ""TKINTER""), (""freetype2"", ""FREETYPE2""), (""littlecms2"", ""LITTLECMS2""), (""webp"", ""WEBP""), (""transp_webp"", ""WEBP Transparency""), (""webp_mux"", ""WEBPMUX""), (""webp_anim"", ""WEBP Animation""), (""jpg"", ""JPEG""), (""jpg_2000"", ""OPENJPEG (JPEG2000)""), (""zlib"", ""ZLIB (PNG/ZIP)""), (""libtiff"", ""LIBTIFF""), (""raqm"", ""RAQM (Bidirectional Text)""), (""libimagequant"", ""LIBIMAGEQUANT (Quantization method)""), ]: if check(name): print(""---"", feature, ""support ok"", file=out) else: print(""***"", feature, ""support not installed"", file=out) print(""-"" * 68, file=out) if not brief: extensions = collections.defaultdict(list) for ext, i in Image.EXTENSION.items(): extensions[i].append(ext) for i in sorted(Image.ID): line = ""{}"".format(i) if i in Image.MIME: line = ""{} {}"".format(line, Image.MIME[i]) print(line, file=out) if i in extensions: print( ""Extensions: {}"".format("", "".join(sorted(extensions[i]))), file=out ) features = [] if i in Image.OPEN: features.append(""open"") if i in Image.SAVE: features.append(""save"") if i in Image.SAVE_ALL: features.append(""save_all"") if i in Image.DECODERS: features.append(""decode"") if i in Image.ENCODERS: features.append(""encode"") print(""Features: {}"".format("", "".join(features)), file=out) print(""-"" * 68, file=out) " 1332,"def set_config(assume_finite=None, working_memory=None, print_changed_only=None): """"""Set global scikit-learn configuration .. versionadded:: 0.19 Parameters ---------- assume_finite : bool, optional If True, validation for finiteness will be skipped, saving time, but leading to potential crashes. If False, validation for finiteness will be performed, avoiding error. Global default: False. .. versionadded:: 0.19 working_memory : int, optional If set, scikit-learn will attempt to limit the size of temporary arrays to this number of MiB (per job when parallelised), often saving both computation time and memory on expensive operations that can be performed in chunks. Global default: 1024. .. versionadded:: 0.20 print_changed_only : bool, optional If True, only the parameters that were set to non-default values will be printed when printing an estimator. For example, ``print(SVC())`` while True will only print 'SVC()' while the default behaviour would be to print 'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters. .. versionadded:: 0.21 See also -------- config_context """""" if assume_finite is not None: _global_config['assume_finite'] = assume_finite if working_memory is not None: _global_config['working_memory'] = working_memory if print_changed_only is not None: _global_config['print_changed_only'] = print_changed_only ","def set_config(assume_finite=None, working_memory=None, print_changed_only=None): """"""Set global scikit-learn configuration .. versionadded:: 0.19 Parameters ---------- assume_finite : bool, optional If True, validation for finiteness will be skipped, saving time, but leading to potential crashes. If False, validation for finiteness will be performed, avoiding error. Global default: False. .. versionadded:: 0.19 working_memory : int, optional If set, scikit-learn will attempt to limit the size of temporary arrays to this number of MiB (per job when parallelised), often saving both computation time and memory on expensive operations that can be performed in chunks. Global default: 1024. .. versionadded:: 0.20 print_changed_only : bool, optional If True, only the parameters that were set to non-default values will be printed when printing an estimator. For example, ``print(SVC())`` while True will only print 'SVC()' while the default behaviour would be to print 'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters. .. versionadded:: 0.21 See Also -------- config_context """""" if assume_finite is not None: _global_config['assume_finite'] = assume_finite if working_memory is not None: _global_config['working_memory'] = working_memory if print_changed_only is not None: _global_config['print_changed_only'] = print_changed_only " 7144,"def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0, overlap=.5, exclude_border=False): r""""""Finds blobs in the given grayscale image. Blobs are found using the Difference of Gaussian (DoG) method [1]_. For each blob found, the method returns its coordinates and the standard deviation of the Gaussian kernel that detected the blob. Parameters ---------- image : 2D or 3D ndarray Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : scalar or sequence of scalars, optional the minimum standard deviation for Gaussian kernel. Keep this low to detect smaller blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. max_sigma : scalar or sequence of scalars, optional The maximum standard deviation for Gaussian kernel. Keep this high to detect larger blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. sigma_ratio : float, optional The ratio between the standard deviation of Gaussian Kernels used for computing the Difference of Gaussians threshold : float, optional. The absolute lower bound for scale space maxima. Local maxima smaller than thresh are ignored. Reduce this to detect blobs with less intensities. overlap : float, optional A value between 0 and 1. If the area of two blobs overlaps by a fraction greater than `threshold`, the smaller blob is eliminated. exclude_border : int or bool, optional If nonzero int, `exclude_border` excludes blobs from within `exclude_border`-pixels of the border of the image. Returns ------- A : (n, image.ndim + sigma) ndarray A 2d array with each row representing 2 coordinate values for a 2D image, and 3 coordinate values for a 3D image, plus the sigma(s) used. When a single sigma is passed, outputs are: ``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard deviation of the Gaussian kernel which detected the blob. When an anisotropic gaussian is used (sigmas per dimension), the detected sigma is returned for each dimension. References ---------- .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach Examples -------- >>> from skimage import data, feature >>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40) array([[ 267. , 359. , 16.777216], [ 267. , 115. , 10.48576 ], [ 263. , 302. , 16.777216], [ 263. , 245. , 16.777216], [ 261. , 173. , 16.777216], [ 260. , 46. , 16.777216], [ 198. , 155. , 10.48576 ], [ 196. , 43. , 10.48576 ], [ 195. , 102. , 16.777216], [ 194. , 277. , 16.777216], [ 193. , 213. , 16.777216], [ 185. , 347. , 16.777216], [ 128. , 154. , 10.48576 ], [ 127. , 102. , 10.48576 ], [ 125. , 208. , 10.48576 ], [ 125. , 45. , 16.777216], [ 124. , 337. , 10.48576 ], [ 120. , 272. , 16.777216], [ 58. , 100. , 10.48576 ], [ 54. , 276. , 10.48576 ], [ 54. , 42. , 16.777216], [ 52. , 216. , 16.777216], [ 52. , 155. , 16.777216], [ 45. , 336. , 16.777216]]) Notes ----- The radius of each blob is approximately :math:`\sqrt{2}\sigma` for a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image. """""" image = img_as_float(image) # Gaussian filter requires that sequence-type sigmas have same # dimensionality as image. This broadcasts scalar kernels if isinstance(max_sigma, (int, float)): max_sigma = np.full(len(image.shape), max_sigma, dtype=np.float) if isinstance(min_sigma, (int, float)): min_sigma = np.full(len(image.shape), min_sigma, dtype=np.float) # Convert sequence types to array min_sigma = np.asarray(min_sigma, dtype=np.float) max_sigma = np.asarray(max_sigma, dtype=np.float) # k such that min_sigma*(sigma_ratio**k) > max_sigma k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1)) # a geometric progression of standard deviations for gaussian kernels sigma_list = np.array([min_sigma * (sigma_ratio ** i) for i in range(k + 1)]) gaussian_images = [gaussian_filter(image, s) for s in sigma_list] # computing difference between two successive Gaussian blurred images # multiplying with average standard deviation provides scale invariance dog_images = [(gaussian_images[i] - gaussian_images[i + 1]) * np.mean(sigma_list[i]) for i in range(k)] image_cube = np.stack(dog_images, axis=-1) # local_maxima = get_local_maxima(image_cube, threshold) local_maxima = peak_local_max(image_cube, threshold_abs=threshold, footprint=np.ones((3,) * (image.ndim + 1)), threshold_rel=0.0, exclude_border=exclude_border) # Catch no peaks if local_maxima.size == 0: return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(np.float64) # translate final column of lm, which contains the index of the # sigma that produced the maximum intensity value, into the sigma sigmas_of_peaks = sigma_list[local_maxima[:, -1]] # if the gaussian is isotropic, the stdev across dimensions are # identical, so return only the stdev deviation of the first dimension if np.unique(min_sigma).shape == (1,) and np.unique(max_sigma).shape == (1,): sigmas_of_peaks = sigmas_of_peaks[:, 0][:, None] # Remove sigma index and replace with sigmas lm = np.hstack([lm[:, :-1], sigmas_of_peaks]) return _prune_blobs(lm, overlap) ","def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0, overlap=.5, exclude_border=False): r""""""Finds blobs in the given grayscale image. Blobs are found using the Difference of Gaussian (DoG) method [1]_. For each blob found, the method returns its coordinates and the standard deviation of the Gaussian kernel that detected the blob. Parameters ---------- image : 2D or 3D ndarray Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : scalar or sequence of scalars, optional the minimum standard deviation for Gaussian kernel. Keep this low to detect smaller blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. max_sigma : scalar or sequence of scalars, optional The maximum standard deviation for Gaussian kernel. Keep this high to detect larger blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. sigma_ratio : float, optional The ratio between the standard deviation of Gaussian Kernels used for computing the Difference of Gaussians threshold : float, optional. The absolute lower bound for scale space maxima. Local maxima smaller than thresh are ignored. Reduce this to detect blobs with less intensities. overlap : float, optional A value between 0 and 1. If the area of two blobs overlaps by a fraction greater than `threshold`, the smaller blob is eliminated. exclude_border : int or bool, optional If nonzero int, `exclude_border` excludes blobs from within `exclude_border`-pixels of the border of the image. Returns ------- A : (n, image.ndim + sigma) ndarray A 2d array with each row representing 2 coordinate values for a 2D image, and 3 coordinate values for a 3D image, plus the sigma(s) used. When a single sigma is passed, outputs are: ``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard deviation of the Gaussian kernel which detected the blob. When an anisotropic gaussian is used (sigmas per dimension), the detected sigma is returned for each dimension. References ---------- .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach Examples -------- >>> from skimage import data, feature >>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40) array([[ 267. , 359. , 16.777216], [ 267. , 115. , 10.48576 ], [ 263. , 302. , 16.777216], [ 263. , 245. , 16.777216], [ 261. , 173. , 16.777216], [ 260. , 46. , 16.777216], [ 198. , 155. , 10.48576 ], [ 196. , 43. , 10.48576 ], [ 195. , 102. , 16.777216], [ 194. , 277. , 16.777216], [ 193. , 213. , 16.777216], [ 185. , 347. , 16.777216], [ 128. , 154. , 10.48576 ], [ 127. , 102. , 10.48576 ], [ 125. , 208. , 10.48576 ], [ 125. , 45. , 16.777216], [ 124. , 337. , 10.48576 ], [ 120. , 272. , 16.777216], [ 58. , 100. , 10.48576 ], [ 54. , 276. , 10.48576 ], [ 54. , 42. , 16.777216], [ 52. , 216. , 16.777216], [ 52. , 155. , 16.777216], [ 45. , 336. , 16.777216]]) Notes ----- The radius of each blob is approximately :math:`\sqrt{2}\sigma` for a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image. """""" image = img_as_float(image) # Gaussian filter requires that sequence-type sigmas have same # dimensionality as image. This broadcasts scalar kernels if isinstance(max_sigma, (int, float)): max_sigma = np.full(image.ndim, max_sigma, dtype=float) if isinstance(min_sigma, (int, float)): min_sigma = np.full(len(image.shape), min_sigma, dtype=np.float) # Convert sequence types to array min_sigma = np.asarray(min_sigma, dtype=np.float) max_sigma = np.asarray(max_sigma, dtype=np.float) # k such that min_sigma*(sigma_ratio**k) > max_sigma k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1)) # a geometric progression of standard deviations for gaussian kernels sigma_list = np.array([min_sigma * (sigma_ratio ** i) for i in range(k + 1)]) gaussian_images = [gaussian_filter(image, s) for s in sigma_list] # computing difference between two successive Gaussian blurred images # multiplying with average standard deviation provides scale invariance dog_images = [(gaussian_images[i] - gaussian_images[i + 1]) * np.mean(sigma_list[i]) for i in range(k)] image_cube = np.stack(dog_images, axis=-1) # local_maxima = get_local_maxima(image_cube, threshold) local_maxima = peak_local_max(image_cube, threshold_abs=threshold, footprint=np.ones((3,) * (image.ndim + 1)), threshold_rel=0.0, exclude_border=exclude_border) # Catch no peaks if local_maxima.size == 0: return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(np.float64) # translate final column of lm, which contains the index of the # sigma that produced the maximum intensity value, into the sigma sigmas_of_peaks = sigma_list[local_maxima[:, -1]] # if the gaussian is isotropic, the stdev across dimensions are # identical, so return only the stdev deviation of the first dimension if np.unique(min_sigma).shape == (1,) and np.unique(max_sigma).shape == (1,): sigmas_of_peaks = sigmas_of_peaks[:, 0][:, None] # Remove sigma index and replace with sigmas lm = np.hstack([lm[:, :-1], sigmas_of_peaks]) return _prune_blobs(lm, overlap) " 59371,"def add_parser(subparsers, parent_parser): PLOTS_HELP = ( ""Commands to visualize and compare plot metrics in structured files "" ""(JSON, YAML, CSV, TSV)."" ) plots_parser = subparsers.add_parser( ""plots"", parents=[parent_parser], description=append_doc_link(PLOTS_HELP, ""plots""), help=PLOTS_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) plots_subparsers = plots_parser.add_subparsers( dest=""cmd"", help=""Use `dvc plots CMD --help` to display command-specific help."", ) fix_subparsers(plots_subparsers) SHOW_HELP = ""Generate plots from metrics files."" plots_show_parser = plots_subparsers.add_parser( ""show"", parents=[parent_parser], description=append_doc_link(SHOW_HELP, ""plots/show""), help=SHOW_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) plots_show_parser.add_argument( ""targets"", nargs=""*"", help=""Files to visualize (supports any file, "" ""even when not found as `plots` in `dvc.yaml`). "" ""Shows all plots by default."", ).complete = completion.FILE _add_props_arguments(plots_show_parser) _add_output_arguments(plots_show_parser) plots_show_parser.set_defaults(func=CmdPlotsShow) PLOTS_DIFF_HELP = ( ""Show multiple versions of plot metrics "" ""by plotting them in a single image."" ) plots_diff_parser = plots_subparsers.add_parser( ""diff"", parents=[parent_parser], description=append_doc_link(PLOTS_DIFF_HELP, ""plots/diff""), help=PLOTS_DIFF_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) plots_diff_parser.add_argument( ""--targets"", nargs=""*"", help=( ""Specific plots file(s) to visualize "" ""(even if not found as `plots` in `dvc.yaml`). "" ""Shows all tracked plots by default."" ), metavar="""", ).complete = completion.FILE plots_diff_parser.add_argument( ""-e"", ""--experiment"", action=""store_true"", default=False, help=argparse.SUPPRESS, ) plots_diff_parser.add_argument( ""revisions"", nargs=""*"", default=None, help=""Git commits to plot from"" ) _add_props_arguments(plots_diff_parser) _add_output_arguments(plots_diff_parser) plots_diff_parser.set_defaults(func=CmdPlotsDiff) PLOTS_MODIFY_HELP = ( ""Modify display properties of plot metrics files. "" ""Does not affect image type plots."" ) plots_modify_parser = plots_subparsers.add_parser( ""modify"", parents=[parent_parser], description=append_doc_link(PLOTS_MODIFY_HELP, ""plots/modify""), help=PLOTS_MODIFY_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) plots_modify_parser.add_argument( ""target"", help=""Metric file to set properties to"" ).complete = completion.FILE _add_props_arguments(plots_modify_parser) plots_modify_parser.add_argument( ""--unset"", nargs=""*"", metavar="""", help=""Unset one or more display properties."", ) plots_modify_parser.set_defaults(func=CmdPlotsModify) ","def add_parser(subparsers, parent_parser): PLOTS_HELP = ( ""Commands to visualize and compare plot metrics in structured files "" ""(JSON, YAML, CSV, TSV)."" ) plots_parser = subparsers.add_parser( ""plots"", parents=[parent_parser], description=append_doc_link(PLOTS_HELP, ""plots""), help=PLOTS_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) plots_subparsers = plots_parser.add_subparsers( dest=""cmd"", help=""Use `dvc plots CMD --help` to display command-specific help."", ) fix_subparsers(plots_subparsers) SHOW_HELP = ""Generate plots from metrics files."" plots_show_parser = plots_subparsers.add_parser( ""show"", parents=[parent_parser], description=append_doc_link(SHOW_HELP, ""plots/show""), help=SHOW_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) plots_show_parser.add_argument( ""targets"", nargs=""*"", help=""Files to visualize (supports any file, "" ""even when not found as `plots` in `dvc.yaml`). "" ""Shows all plots by default."", ).complete = completion.FILE _add_props_arguments(plots_show_parser) _add_output_arguments(plots_show_parser) plots_show_parser.set_defaults(func=CmdPlotsShow) PLOTS_DIFF_HELP = ( ""Show multiple versions of plot metrics "" ""by plotting them in a single image."" ) plots_diff_parser = plots_subparsers.add_parser( ""diff"", parents=[parent_parser], description=append_doc_link(PLOTS_DIFF_HELP, ""plots/diff""), help=PLOTS_DIFF_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) plots_diff_parser.add_argument( ""--targets"", nargs=""*"", help=( ""Specific plots file(s) to visualize "" ""(even if not found as `plots` in `dvc.yaml`). "" ""Shows all tracked plots by default."" ), metavar="""", ).complete = completion.FILE plots_diff_parser.add_argument( ""-e"", ""--experiment"", action=""store_true"", default=False, help=argparse.SUPPRESS, ) plots_diff_parser.add_argument( ""revisions"", nargs=""*"", default=None, help=""Git commits to plot from"" ) _add_props_arguments(plots_diff_parser) _add_output_arguments(plots_diff_parser) plots_diff_parser.set_defaults(func=CmdPlotsDiff) PLOTS_MODIFY_HELP = ( ""Modify display properties of data-series plots "" ""(has no effect on image-type plots)."" ) plots_modify_parser = plots_subparsers.add_parser( ""modify"", parents=[parent_parser], description=append_doc_link(PLOTS_MODIFY_HELP, ""plots/modify""), help=PLOTS_MODIFY_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) plots_modify_parser.add_argument( ""target"", help=""Metric file to set properties to"" ).complete = completion.FILE _add_props_arguments(plots_modify_parser) plots_modify_parser.add_argument( ""--unset"", nargs=""*"", metavar="""", help=""Unset one or more display properties."", ) plots_modify_parser.set_defaults(func=CmdPlotsModify) " 3925,"def lexicographical_topological_sort(G, key=None): """"""Generates a unique ordering of nodes by first sorting topologically (for which there are often multiple valid orderings) and then additionally by sorting lexicographically. A topological sort arranges the nodes of a directed graph so that the upstream node of each directed edge precedes the downstream node. It is always possible to find a solution for directed graphs that have no cycles. There may be more than one valid solution. Lexicographical sorting is just sorting alphabetically. It is used here to break ties in the topological sort and to determine a single, unique ordering. This can be useful in comparing sort results. The lexicographical order can be customized by providing a function to the `key=` parameter. The definition of the key function is the same as used in python's built-in `sort()`. The function takes a single argument and returns a key to use for sorting purposes. Lexicographical sorting can fail if the node names are un-sortable. See the example below. The solution is to provide a function to the `key=` argument that returns sortable keys. Parameters ---------- G : NetworkX digraph A directed acyclic graph (DAG) key : function, optional A function of one argument that converts a node name to a comparison key. Use to resolve ambiguities in the sort order. Defaults to the identity function. Yields ------ nodes Yields the nodes of G in lexicographical topological sort order. Raises ------ NetworkXError Topological sort is defined for directed graphs only. If the graph `G` is undirected, a :exc:`NetworkXError` is raised. NetworkXUnfeasible If `G` is not a directed acyclic graph (DAG) no topological sort exists and a :exc:`NetworkXUnfeasible` exception is raised. This can also be raised if `G` is changed while the returned iterator is being processed RuntimeError If `G` is changed while the returned iterator is being processed. TypeError Results from un-sortable node names. Consider using `key=` parameter to resolve ambiguities in the sort order. Examples -------- >>> DG = nx.DiGraph([(2, 1), (2, 5), (1, 3), (1, 4), (5, 4)]) >>> list(nx.lexicographical_topological_sort(DG)) [2, 1, 3, 5, 4] >>> list(nx.lexicographical_topological_sort(DG, key=lambda x: -x)) [2, 5, 1, 4, 3] The sort will fail for this graph because the comparison of integers to strings is not defined in python. Is 3 greater or less than 'red'? >>> DG = nx.DiGraph([(1, 'red'), (3, 'red'), (1, 'green'), (2, 'blue')]) >>> list(nx.lexicographical_topological_sort(DG)) Traceback (most recent call last): ... TypeError: '<' not supported between instances of 'str' and 'int' ... The solution is to provide a function that returns keys that do compare. There are many ways to write a `key` function. This one returns a tuple where the first element is True for `str`, False otherwise. The second element is the node name. This groups the strings and integers separately so they can be compared only among themselves. >>> key = lambda node: (isinstance(node, str), node) >>> list(nx.lexicographical_topological_sort(DG, key=key)) [1, 2, 3, 'blue', 'green', 'red'] Notes ----- This algorithm is based on a description and proof in ""Introduction to Algorithms: A Creative Approach"" [1]_ . See also -------- topological_sort References ---------- .. [1] Manber, U. (1989). *Introduction to Algorithms - A Creative Approach.* Addison-Wesley. """""" if not G.is_directed(): msg = ""Topological sort not defined on undirected graphs."" raise nx.NetworkXError(msg) if key is None: def key(node): return node nodeid_map = {n: i for i, n in enumerate(G)} def create_tuple(node): return key(node), nodeid_map[node], node indegree_map = {v: d for v, d in G.in_degree() if d > 0} # These nodes have zero indegree and ready to be returned. zero_indegree = [create_tuple(v) for v, d in G.in_degree() if d == 0] heapq.heapify(zero_indegree) while zero_indegree: _, _, node = heapq.heappop(zero_indegree) if node not in G: raise RuntimeError(""Graph changed during iteration"") for _, child in G.edges(node): try: indegree_map[child] -= 1 except KeyError as err: raise RuntimeError(""Graph changed during iteration"") from err if indegree_map[child] == 0: try: heapq.heappush(zero_indegree, create_tuple(child)) except TypeError as err: raise TypeError( f""{err}\nConsider using `key=` parameter to resolve ambiguities in the sort order."" ) del indegree_map[child] yield node if indegree_map: msg = ""Graph contains a cycle or graph changed during iteration"" raise nx.NetworkXUnfeasible(msg) ","def lexicographical_topological_sort(G, key=None): """"""Generates a unique ordering of nodes by first sorting topologically (for which there are often multiple valid orderings) and then additionally by sorting lexicographically. A topological sort arranges the nodes of a directed graph so that the upstream node of each directed edge precedes the downstream node. It is always possible to find a solution for directed graphs that have no cycles. There may be more than one valid solution. Lexicographical sorting is just sorting alphabetically. It is used here to break ties in the topological sort and to determine a single, unique ordering. This can be useful in comparing sort results. The lexicographical order can be customized by providing a function to the `key=` parameter. The definition of the key function is the same as used in python's built-in `sort()`. The function takes a single argument and returns a key to use for sorting purposes. Lexicographical sorting can fail if the node names are un-sortable. See the example below. The solution is to provide a function to the `key=` argument that returns sortable keys. Parameters ---------- G : NetworkX digraph A directed acyclic graph (DAG) key : function, optional A function of one argument that converts a node name to a comparison key. Use to resolve ambiguities in the sort order. Defaults to the identity function. Yields ------ nodes Yields the nodes of G in lexicographical topological sort order. Raises ------ NetworkXError Topological sort is defined for directed graphs only. If the graph `G` is undirected, a :exc:`NetworkXError` is raised. NetworkXUnfeasible If `G` is not a directed acyclic graph (DAG) no topological sort exists and a :exc:`NetworkXUnfeasible` exception is raised. This can also be raised if `G` is changed while the returned iterator is being processed RuntimeError If `G` is changed while the returned iterator is being processed. TypeError Results from un-sortable node names. Consider using `key=` parameter to resolve ambiguities in the sort order. Examples -------- >>> DG = nx.DiGraph([(2, 1), (2, 5), (1, 3), (1, 4), (5, 4)]) >>> list(nx.lexicographical_topological_sort(DG)) [2, 1, 3, 5, 4] >>> list(nx.lexicographical_topological_sort(DG, key=lambda x: -x)) [2, 5, 1, 4, 3] The sort will fail for any graph with integer and string nodes. Comparison of integer to strings is not defined in python. Is 3 greater or less than 'red'? >>> DG = nx.DiGraph([(1, 'red'), (3, 'red'), (1, 'green'), (2, 'blue')]) >>> list(nx.lexicographical_topological_sort(DG)) Traceback (most recent call last): ... TypeError: '<' not supported between instances of 'str' and 'int' ... The solution is to provide a function that returns keys that do compare. There are many ways to write a `key` function. This one returns a tuple where the first element is True for `str`, False otherwise. The second element is the node name. This groups the strings and integers separately so they can be compared only among themselves. >>> key = lambda node: (isinstance(node, str), node) >>> list(nx.lexicographical_topological_sort(DG, key=key)) [1, 2, 3, 'blue', 'green', 'red'] Notes ----- This algorithm is based on a description and proof in ""Introduction to Algorithms: A Creative Approach"" [1]_ . See also -------- topological_sort References ---------- .. [1] Manber, U. (1989). *Introduction to Algorithms - A Creative Approach.* Addison-Wesley. """""" if not G.is_directed(): msg = ""Topological sort not defined on undirected graphs."" raise nx.NetworkXError(msg) if key is None: def key(node): return node nodeid_map = {n: i for i, n in enumerate(G)} def create_tuple(node): return key(node), nodeid_map[node], node indegree_map = {v: d for v, d in G.in_degree() if d > 0} # These nodes have zero indegree and ready to be returned. zero_indegree = [create_tuple(v) for v, d in G.in_degree() if d == 0] heapq.heapify(zero_indegree) while zero_indegree: _, _, node = heapq.heappop(zero_indegree) if node not in G: raise RuntimeError(""Graph changed during iteration"") for _, child in G.edges(node): try: indegree_map[child] -= 1 except KeyError as err: raise RuntimeError(""Graph changed during iteration"") from err if indegree_map[child] == 0: try: heapq.heappush(zero_indegree, create_tuple(child)) except TypeError as err: raise TypeError( f""{err}\nConsider using `key=` parameter to resolve ambiguities in the sort order."" ) del indegree_map[child] yield node if indegree_map: msg = ""Graph contains a cycle or graph changed during iteration"" raise nx.NetworkXUnfeasible(msg) " 34872,"def keras_op_to_relay(inexpr, keras_layer, outname, etab): """"""Convert keras layer to relay expr, and update etab. Parameters ---------- inexpr : relay.expr.Expr or a list of it The input relay expr(s) keras_layer : keras.layers The keras layer to be converted outname : str Name of the output relay expr etab : relay.frontend.common.ExprTable The global expr table to be updated """""" if type(keras_layer).__name__ not in _convert_map: raise NotImplementedError(""{} is not supported"".format((type(keras_layer).__name__))) outs = _convert_map[type(keras_layer).__name__](inexpr, keras_layer, etab) outs = _as_list(outs) for t_idx, out in enumerate(outs): name = outname + "":"" + str(t_idx) etab.set_expr(name, out) ","def keras_op_to_relay(inexpr, keras_layer, outname, etab): """"""Convert keras layer to relay expr, and update etab. Parameters ---------- inexpr : relay.expr.Expr or a list of it The input relay expr(s) keras_layer : keras.layers The keras layer to be converted outname : str Name of the output relay expr etab : relay.frontend.common.ExprTable The global expression table to be updated. """""" if type(keras_layer).__name__ not in _convert_map: raise NotImplementedError(""{} is not supported"".format((type(keras_layer).__name__))) outs = _convert_map[type(keras_layer).__name__](inexpr, keras_layer, etab) outs = _as_list(outs) for t_idx, out in enumerate(outs): name = outname + "":"" + str(t_idx) etab.set_expr(name, out) " 30503,"def main(): params = {k: v for k, v in demisto.params().items() if v is not None} subfeeds = ['all', 'ssh', 'mail', 'apache', 'imap', 'ftp', 'sip', 'bots', 'strongips', 'ircbot', 'bruteforcelogin'] feed_types = dict() for subfeed in subfeeds: feed_types[F'https://lists.blocklist.de/lists/{subfeed}.txt'] = { 'indicator_type': FeedIndicatorType.IP, } params['feed_url_to_config'] = feed_types chosen_subfeeds = list() for subfeed in argToList(demisto.params().get('subfeeds', [])): chosen_subfeeds.append(F'https://lists.blocklist.de/lists/{subfeed}.txt') params['url'] = chosen_subfeeds # Call the main execution of the HTTP API module. feed_main('Blocklist_de Feed', params, 'blocklist_de-') ","def main(): params = {k: v for k, v in demisto.params().items() if v is not None} subfeeds = ['all', 'ssh', 'mail', 'apache', 'imap', 'ftp', 'sip', 'bots', 'strongips', 'bruteforcelogin'] feed_types = dict() for subfeed in subfeeds: feed_types[F'https://lists.blocklist.de/lists/{subfeed}.txt'] = { 'indicator_type': FeedIndicatorType.IP, } params['feed_url_to_config'] = feed_types chosen_subfeeds = list() for subfeed in argToList(demisto.params().get('subfeeds', [])): chosen_subfeeds.append(F'https://lists.blocklist.de/lists/{subfeed}.txt') params['url'] = chosen_subfeeds # Call the main execution of the HTTP API module. feed_main('Blocklist_de Feed', params, 'blocklist_de-') " 25046,"def _is_invalid_base_class(cls: InferenceResult) -> bool: return cls.name in INVALID_BASE_CLASSES and is_builtin_object(cls) ","def _is_invalid_base_class(cls: nodes.ClassDef) -> bool: return cls.name in INVALID_BASE_CLASSES and is_builtin_object(cls) " 29820,"def _get_comments_for_key(data: CommentedMap, key: Any) -> Optional[str]: # this is a little weird, but ruamel is returning a list that looks like: # [None, None, CommentToken(...), None] for some reason instead of just a # single string # Sometimes ruamel returns a recursive list of as well that looks like # [None, None, [CommentToken(...),CommentToken(...),None], CommentToken(...), None] def _flatten_comments(comments): for comment in comments: if comment is None: continue if isinstance(comment, list): yield from _flatten_comments(comment) else: yield comment.value raw_comments = [*_flatten_comments(data.ca.items.get(key, []))] if not raw_comments: # return None so that we don't return an empty string below if there really aren't # any comments return None # joining all comments together before returning them comment = """".join(raw_comments) return comment ","def _get_comments_for_key(data: CommentedMap, key: Any) -> Optional[str]: # this is a little weird, but ruamel is returning a list that looks like: # [None, None, CommentToken(...), None] for some reason instead of just a # single string # Sometimes ruamel returns a recursive list of CommentTokens as well that looks like # [None, None, [CommentToken(...),CommentToken(...),None], CommentToken(...), None] def _flatten_comments(comments): for comment in comments: if comment is None: continue if isinstance(comment, list): yield from _flatten_comments(comment) else: yield comment.value raw_comments = [*_flatten_comments(data.ca.items.get(key, []))] if not raw_comments: # return None so that we don't return an empty string below if there really aren't # any comments return None # joining all comments together before returning them comment = """".join(raw_comments) return comment " 32444,"def main() -> None: try: base_url = demisto.params()[""api_url""] org_name = demisto.params()[""url""].split(""."")[0].replace(""https://"", """") api_key = demisto.params().get(""api_key"", {}).get(""password"") secret_key = demisto.params().get(""secret_key"", {}).get(""password"") client = BreachRxClient(base_url, api_key, secret_key, org_name) command_func: Any[Callable, None] = COMMANDS.get(demisto.command()) if command_func is not None: return_results(command_func(client, **demisto.args())) except Exception as e: demisto.error(traceback.format_exc()) return_error(f""Failed to execute {demisto.command()} command.\nError:\n{str(e)}"") ","def main() -> None: # pragma: no cover try: base_url = demisto.params()[""api_url""] org_name = demisto.params()[""url""].split(""."")[0].replace(""https://"", """") api_key = demisto.params().get(""api_key"", {}).get(""password"") secret_key = demisto.params().get(""secret_key"", {}).get(""password"") client = BreachRxClient(base_url, api_key, secret_key, org_name) command_func: Any[Callable, None] = COMMANDS.get(demisto.command()) if command_func is not None: return_results(command_func(client, **demisto.args())) except Exception as e: demisto.error(traceback.format_exc()) return_error(f""Failed to execute {demisto.command()} command.\nError:\n{str(e)}"") " 41947,"def test_conversion_from_distribution_to_dimension() -> None: sampler = optuna.integration.SkoptSampler() study = optuna.create_study(sampler=sampler) with patch(""skopt.Optimizer"") as mock_object: study.optimize(_objective, n_trials=2, catch=()) dimensions = [ # Original: trial.suggest_float('p0', -3.3, 5.2) space.Real(-3.3, 5.2), # Original: trial.suggest_float('p1', 2.0, 2.0) # => Skipped because `skopt.Optimizer` cannot handle an empty `Real` dimension. # Original: trial.suggest_float('p9', 2.2, 2.2, size=0.5) # => Skipped because `skopt.Optimizer` cannot handle an empty `Real` dimension. # Original: trial.suggest_categorical('p10', ['9', '3', '0', '8']) space.Categorical((""9"", ""3"", ""0"", ""8"")), # Original: trial.suggest_float('p2', 0.0001, 0.3, log=True) space.Real(0.0001, 0.3, prior=""log-uniform""), # Original: trial.suggest_float('p3', 1.1, 1.1, log=True) # => Skipped because `skopt.Optimizer` cannot handle an empty `Real` dimension. # Original: trial.suggest_int('p4', -100, 8) space.Integer(0, 108), # Original: trial.suggest_int('p5', -20, -20) # => Skipped because `skopt.Optimizer` cannot handle an empty `Real` dimension. # Original: trial.suggest_int('p6', 1, 8, log=True) space.Real(0.5, 8.5, prior=""log-uniform""), # Original: trial.suggest_float('p7', 10, 20, step=2) space.Integer(0, 5), # Original: trial.suggest_float('p8', 0.1, 1.0, step=0.1) space.Integer(0, 8), ] assert mock_object.mock_calls[0] == call(dimensions) ","def test_conversion_from_distribution_to_dimension() -> None: sampler = optuna.integration.SkoptSampler() study = optuna.create_study(sampler=sampler) with patch(""skopt.Optimizer"") as mock_object: study.optimize(_objective, n_trials=2, catch=()) dimensions = [ # Original: trial.suggest_float('p0', -3.3, 5.2) space.Real(-3.3, 5.2), # Original: trial.suggest_float('p1', 2.0, 2.0) # => Skipped because `skopt.Optimizer` cannot handle an empty `Real` dimension. # Original: trial.suggest_float('p9', 2.2, 2.2, step=0.5) # => Skipped because `skopt.Optimizer` cannot handle an empty `Real` dimension. # Original: trial.suggest_categorical('p10', ['9', '3', '0', '8']) space.Categorical((""9"", ""3"", ""0"", ""8"")), # Original: trial.suggest_float('p2', 0.0001, 0.3, log=True) space.Real(0.0001, 0.3, prior=""log-uniform""), # Original: trial.suggest_float('p3', 1.1, 1.1, log=True) # => Skipped because `skopt.Optimizer` cannot handle an empty `Real` dimension. # Original: trial.suggest_int('p4', -100, 8) space.Integer(0, 108), # Original: trial.suggest_int('p5', -20, -20) # => Skipped because `skopt.Optimizer` cannot handle an empty `Real` dimension. # Original: trial.suggest_int('p6', 1, 8, log=True) space.Real(0.5, 8.5, prior=""log-uniform""), # Original: trial.suggest_float('p7', 10, 20, step=2) space.Integer(0, 5), # Original: trial.suggest_float('p8', 0.1, 1.0, step=0.1) space.Integer(0, 8), ] assert mock_object.mock_calls[0] == call(dimensions) " 30551,"def get_all_incidents(from_date): contents = demisto.executeCommand(""getIncidents"", {""fromdate"": from_date})[0]['Contents'] incidents = contents['data'] size = len(incidents) total = contents['total'] page = 1 while total > size: contents = demisto.executeCommand(""getIncidents"", {""fromdate"": from_date, ""page"": page})[0]['Contents'] new_incidents = contents['data'] incidents = incidents + new_incidents size = len(incidents) page = page + 1 return incidents ","def get_all_incidents(from_date): contents = demisto.executeCommand(""getIncidents"", {""fromdate"": from_date})[0]['Contents'] incidents = contents['data'] size = len(incidents) total = contents['total'] page = 1 while total > size: contents = demisto.executeCommand(""getIncidents"", {""fromdate"": from_date, ""page"": page})[0]['Contents'] new_incidents = contents['data'] incidents += new_incidents size = len(incidents) page = page + 1 return incidents " 39368,"def vtk_points(points, deep=True, force_float=False): """"""Convert numpy array or array-like to a ``vtkPoints`` object. Parameters ---------- points : numpy.ndarray or sequence Points to convert. Should be 1 or 2 dimensional. Accepts a single point or several points. deep : bool, optional Perform a deep copy of the array. Only applicable if ``points`` is a :class:`numpy.ndarray`. force_float : bool, optional Casts the datatype to float32 if points datatype are non-float. Set this to ``False`` to allow non-float types, though this may lead to errors when transforming datasets. Returns ------- vtk.vtkPoints The vtkPoints object. Examples -------- >>> import pyvista >>> import numpy as np >>> points = np.random.random((10, 3)) >>> vpoints = pyvista.vtk_points(points) >>> vpoints # doctest:+SKIP (vtkmodules.vtkCommonCore.vtkPoints)0x7f0c2e26af40 """""" points = np.asanyarray(points) # verify is numeric if not np.issubdtype(points.dtype, np.number): raise TypeError('Points must be a numeric type') if force_float: if not np.issubdtype(points.dtype, np.floating): warnings.warn( 'Points is not a float type. This can cause issues when ' 'transforming or applying filters. Casting to ' '``np.float32``. Disable this by passing ' '``float_float=False``' ) points = points.astype(np.float32) # check dimensionality if points.ndim == 1: points = points.reshape(-1, 3) elif points.ndim > 2: raise ValueError('Dimension of ``points`` should be 1 or 2, not ' f'{points.ndim}') # verify shape if points.shape[1] != 3: raise ValueError('Points array must contain three values per point. ' f'Shape is {points.shape} and should be (X, 3)') # points must be contiguous points = np.require(points, requirements=['C']) vtkpts = _vtk.vtkPoints() vtk_arr = _vtk.numpy_to_vtk(points, deep=deep) vtkpts.SetData(vtk_arr) return vtkpts ","def vtk_points(points, deep=True, force_float=False): """"""Convert numpy array or array-like to a ``vtkPoints`` object. Parameters ---------- points : numpy.ndarray or sequence Points to convert. Should be 1 or 2 dimensional. Accepts a single point or several points. deep : bool, optional Perform a deep copy of the array. Only applicable if ``points`` is a :class:`numpy.ndarray`. force_float : bool, optional Casts the datatype to float32 if points datatype are non-float. Set this to ``False`` to allow non-float types, though this may lead to errors when transforming datasets. Returns ------- vtk.vtkPoints The vtkPoints object. Examples -------- >>> import pyvista >>> import numpy as np >>> points = np.random.random((10, 3)) >>> vpoints = pyvista.vtk_points(points) >>> vpoints # doctest:+SKIP (vtkmodules.vtkCommonCore.vtkPoints)0x7f0c2e26af40 """""" points = np.asanyarray(points) # verify is numeric if not np.issubdtype(points.dtype, np.number): raise TypeError('Points must be a numeric type') if force_float: if not np.issubdtype(points.dtype, np.floating): warnings.warn( 'Points is not a float type. This can cause issues when ' 'transforming or applying filters. Casting to ' '``np.float32``. Disable this by passing ' '``force_float=False``.' ) points = points.astype(np.float32) # check dimensionality if points.ndim == 1: points = points.reshape(-1, 3) elif points.ndim > 2: raise ValueError('Dimension of ``points`` should be 1 or 2, not ' f'{points.ndim}') # verify shape if points.shape[1] != 3: raise ValueError('Points array must contain three values per point. ' f'Shape is {points.shape} and should be (X, 3)') # points must be contiguous points = np.require(points, requirements=['C']) vtkpts = _vtk.vtkPoints() vtk_arr = _vtk.numpy_to_vtk(points, deep=deep) vtkpts.SetData(vtk_arr) return vtkpts " 39108,"def convert_to_schema(schema: list) -> List[bigquery.SchemaField]: """"""Read the schema as a JSON and reformats as an array. Args: schema: list of dicts to convert to list of SchemaField Returns: List of bigquery.SchemaField objects holding the schema. """""" input_fields = schema schema = [] for input_field in input_fields: schema.append( bigquery.SchemaField(input_field['name'], input_field['type'], mode=input_field['mode'])) return schema ","def convert_to_schema(schema: List[Dict[str, str]]) -> List[bigquery.SchemaField]: """"""Read the schema as a JSON and reformats as an array. Args: schema: list of dicts to convert to list of SchemaField Returns: List of bigquery.SchemaField objects holding the schema. """""" input_fields = schema schema = [] for input_field in input_fields: schema.append( bigquery.SchemaField(input_field['name'], input_field['type'], mode=input_field['mode'])) return schema " 55759,"def test_is_default_color(): """"""Test labels layer default color for None and background"""""" data = np.random.randint(20, size=(10, 15)) layer = Labels(data) # layer gets instantiated with defaults current_color = layer.color assert layer._is_default_colors(current_color) # setting color to default colors doesn't update color mode layer.color = current_color assert layer.color_mode == 'auto' # new colors are not default new_color = {0: 'white', 1: 'red', 3: 'green'} assert not layer._is_default_colors(new_color) # setting the color with non-default colors updates color mode layer.color = new_color assert layer.color_mode == 'direct' ","def test_is_default_color(): """"""Test that setting Labels.color to default works. In previous versions of napari, setting `color` to be the default values would break rendering. Here we check that this works as expected. For more information see: - https://github.com/napari/napari/issues/2479 - https://github.com/napari/napari/issues/2953 """""" data = np.random.randint(20, size=(10, 15)) layer = Labels(data) # layer gets instantiated with defaults current_color = layer.color assert layer._is_default_colors(current_color) # setting color to default colors doesn't update color mode layer.color = current_color assert layer.color_mode == 'auto' # new colors are not default new_color = {0: 'white', 1: 'red', 3: 'green'} assert not layer._is_default_colors(new_color) # setting the color with non-default colors updates color mode layer.color = new_color assert layer.color_mode == 'direct' " 44348,"def steady_state(lindblad, sparse=None, method=""ed"", rho0=None, **kwargs): r""""""Computes the numerically exact steady-state of a lindblad master equation. The computation is performed either through the exact diagonalization of the hermitian L^\dagger L matrix, or by means of an iterative solver (bicgstabl) targeting the solution of the non-hermitian system L\rho = 0 && \Tr[\rho] = 1. Note that for systems with 7 or more sites it is usually computationally impossible to build the full lindblad operator and therefore only `iterative` will work. Note that for systems with hilbert spaces with dimensions above 40k, tol should be set to a lower value if the steady state has non-trivial correlations. Args: lindblad: The lindbladian encoding the master equation. sparse: Whever to use sparse matrices (default: False for ed, True for iterative) method: 'ed' (exact diagonalization) or 'iterative' (iterative bicgstabl) rho0: starting density matrix for the iterative diagonalization (default: None) kwargs...: additional kwargs passed to bicgstabl Optional args for iterative: For full docs please consult SciPy documentation at https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.bicgstab.html maxiter: maximum number of iterations for the iterative solver (default: None) tol: The precision for the calculation (default: 1e-05) callback: User-supplied function to call after each iteration. It is called as callback(xk), where xk is the current solution vector Returns: The steady-state density matrix. """""" from numpy import sqrt, array M = lindblad.hilbert.physical.n_states if method == ""ed"": if sparse is None: sparse = False if not sparse: from numpy.linalg import eigh lind_mat = lindblad.to_dense() ldagl = lind_mat.conj().T * lind_mat w, v = eigh(ldagl) else: from scipy.sparse.linalg import eigsh lind_mat = lindblad.to_sparse() ldagl = lind_mat.H * lind_mat w, v = eigsh(ldagl, which=""SM"", k=2) print(""Minimum eigenvalue is: "", w[0]) rho = array(v[:, 0].reshape((M, M))) rho = rho / rho.trace() elif method == ""iterative"": if sparse is None: sparse = True # An extra row is added at the bottom of the therefore M^2+1 long array, # with the trace of the density matrix. This is needed to enforce the # trace-1 condition. L = lindblad.to_linear_operator(sparse=sparse, append_trace=True) # Initial density matrix ( + trace condition) Lrho_start = np.zeros((M ** 2 + 1), dtype=L.dtype) if rho0 is None: Lrho_start[0] = 1.0 Lrho_start[-1] = 1.0 else: Lrho_start[:-1] = rho0.reshape(-1) Lrho_start[-1] = rho0.trace() # Target residual (everything 0 and trace 1) Lrho_target = np.zeros((M ** 2 + 1), dtype=L.dtype) Lrho_target[-1] = 1.0 # Iterative solver print(""Starting iterative solver..."") res, info = bicgstab(L, Lrho_target, x0=Lrho_start, **kwargs) rho = res[:-1].reshape((M, M)) if info == 0: print(""Converged trace is "", rho.trace()) elif info > 0: print(""Failed to converge after "", info, "" ( trace is "", rho.trace(), "" )"") elif info < 0: print(""An error occured: "", info) else: raise ValueError(""method must be 'ed' or 'iterative'"") return rho ","def steady_state(lindblad, *, sparse=None, method=""ed"", rho0=None, **kwargs): r""""""Computes the numerically exact steady-state of a lindblad master equation. The computation is performed either through the exact diagonalization of the hermitian L^\dagger L matrix, or by means of an iterative solver (bicgstabl) targeting the solution of the non-hermitian system L\rho = 0 && \Tr[\rho] = 1. Note that for systems with 7 or more sites it is usually computationally impossible to build the full lindblad operator and therefore only `iterative` will work. Note that for systems with hilbert spaces with dimensions above 40k, tol should be set to a lower value if the steady state has non-trivial correlations. Args: lindblad: The lindbladian encoding the master equation. sparse: Whever to use sparse matrices (default: False for ed, True for iterative) method: 'ed' (exact diagonalization) or 'iterative' (iterative bicgstabl) rho0: starting density matrix for the iterative diagonalization (default: None) kwargs...: additional kwargs passed to bicgstabl Optional args for iterative: For full docs please consult SciPy documentation at https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.bicgstab.html maxiter: maximum number of iterations for the iterative solver (default: None) tol: The precision for the calculation (default: 1e-05) callback: User-supplied function to call after each iteration. It is called as callback(xk), where xk is the current solution vector Returns: The steady-state density matrix. """""" from numpy import sqrt, array M = lindblad.hilbert.physical.n_states if method == ""ed"": if sparse is None: sparse = False if not sparse: from numpy.linalg import eigh lind_mat = lindblad.to_dense() ldagl = lind_mat.conj().T * lind_mat w, v = eigh(ldagl) else: from scipy.sparse.linalg import eigsh lind_mat = lindblad.to_sparse() ldagl = lind_mat.H * lind_mat w, v = eigsh(ldagl, which=""SM"", k=2) print(""Minimum eigenvalue is: "", w[0]) rho = array(v[:, 0].reshape((M, M))) rho = rho / rho.trace() elif method == ""iterative"": if sparse is None: sparse = True # An extra row is added at the bottom of the therefore M^2+1 long array, # with the trace of the density matrix. This is needed to enforce the # trace-1 condition. L = lindblad.to_linear_operator(sparse=sparse, append_trace=True) # Initial density matrix ( + trace condition) Lrho_start = np.zeros((M ** 2 + 1), dtype=L.dtype) if rho0 is None: Lrho_start[0] = 1.0 Lrho_start[-1] = 1.0 else: Lrho_start[:-1] = rho0.reshape(-1) Lrho_start[-1] = rho0.trace() # Target residual (everything 0 and trace 1) Lrho_target = np.zeros((M ** 2 + 1), dtype=L.dtype) Lrho_target[-1] = 1.0 # Iterative solver print(""Starting iterative solver..."") res, info = bicgstab(L, Lrho_target, x0=Lrho_start, **kwargs) rho = res[:-1].reshape((M, M)) if info == 0: print(""Converged trace is "", rho.trace()) elif info > 0: print(""Failed to converge after "", info, "" ( trace is "", rho.trace(), "" )"") elif info < 0: print(""An error occured: "", info) else: raise ValueError(""method must be 'ed' or 'iterative'"") return rho " 26381,"def background_check_wrapper_changed(library, songs): for song in songs: if not s._needs_write: continue try: song._song.write() except AudioFileError as e: print_w(""Couldn't save song %s (%s)"" % (song(""~filename""), e)) _inform_library_of_changed(library, songs) ","def background_check_wrapper_changed(library, songs): for song in songs: if not song._needs_write: continue try: song._song.write() except AudioFileError as e: print_w(""Couldn't save song %s (%s)"" % (song(""~filename""), e)) _inform_library_of_changed(library, songs) " 21942,"def singular_values_plot(syslist, omega=None, plot=True, omega_limits=None, omega_num=None, *args, **kwargs): """"""Singular value plot for a system Plots a Singular Value plot for the system over a (optional) frequency range. Parameters ---------- syslist : linsys List of linear systems (single system is OK) omega : array_like List of frequencies in rad/sec to be used for frequency response plot : bool If True (default), plot magnitude and phase omega_limits : array_like of two values Limits of the to generate frequency vector. If Hz=True the limits are in Hz otherwise in rad/s. omega_num : int Number of samples to plot. Defaults to config.defaults['freqplot.number_of_samples']. Returns ------- sigma : ndarray (or list of ndarray if len(syslist) > 1)) singular values omega : ndarray (or list of ndarray if len(syslist) > 1)) frequency in rad/sec Other Parameters ---------------- grid : bool If True, plot grid lines on gain and phase plots. Default is set by `config.defaults['bode.grid']`. Examples -------- >>> den = [75, 1] >>> sys = ct.tf([[[87.8], [-86.4]], [[108.2], [-109.6]]], [[den, den], [den, den]]) >>> omega = np.logspace(-4, 1, 1000) >>> sigma, omega = singular_values_plot(sys) """""" # Make a copy of the kwargs dictionary since we will modify it kwargs = dict(kwargs) # Check to see if legacy 'Plot' keyword was used if 'Plot' in kwargs: import warnings warnings.warn(""'Plot' keyword is deprecated in bode_plot; use 'plot'"", FutureWarning) # Map 'Plot' keyword to 'plot' keyword plot = kwargs.pop('Plot') # Get values for params (and pop from list to allow keyword use in plot) dB = config._get_param('bode', 'dB', kwargs, _bode_defaults, pop=True) Hz = config._get_param('bode', 'Hz', kwargs, _bode_defaults, pop=True) grid = config._get_param('bode', 'grid', kwargs, _bode_defaults, pop=True) plot = config._get_param('bode', 'grid', plot, True) # If argument was a singleton, turn it into a tuple if not hasattr(syslist, '__iter__'): syslist = (syslist,) # Decide whether to go above Nyquist frequency omega_range_given = True if omega is not None else False if omega is None: omega_num = config._get_param( 'freqplot', 'number_of_samples', omega_num) if omega_limits is None: # Select a default range if none is provided omega = _default_frequency_range(syslist, number_of_samples=omega_num) else: omega_range_given = True omega_limits = np.asarray(omega_limits) if len(omega_limits) != 2: raise ValueError(""len(omega_limits) must be 2"") if Hz: omega_limits *= 2. * math.pi omega = np.logspace(np.log10(omega_limits[0]), np.log10(omega_limits[1]), num=omega_num, endpoint=True) if plot: fig = plt.gcf() ax_sigma = None # Get the current axes if they already exist for ax in fig.axes: if ax.get_label() == 'control-sigma': ax_sigma = ax # If no axes present, create them from scratch if ax_sigma is None: plt.clf() ax_sigma = plt.subplot(111, label='control-sigma') color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color'] sigmas, omegas, nyquistfrqs = [], [], [] for idx_sys, sys in enumerate(syslist): omega_sys = np.asarray(omega) if sys.isdtime(strict=True): nyquistfrq = math.pi / sys.dt if not omega_range_given: # limit up to and including nyquist frequency omega_sys = np.hstack(( omega_sys[omega_sys < nyquistfrq], nyquistfrq)) else: nyquistfrq = None mag, phase, omega = sys.frequency_response(omega) fresp = mag * np.exp(1j * phase) #fresp = evalfr(sys, 1j * omega_sys) fresp = fresp.transpose((2, 0, 1)) sigma = np.linalg.svd(fresp, compute_uv=False) sigmas.append(sigma) omegas.append(omega_sys) nyquistfrqs.append(nyquistfrq) if plot: color = color_cycle[idx_sys % len(color_cycle)] nyquistfrq_plot = None if Hz: omega_plot = omega_sys / (2. * math.pi) if nyquistfrq: nyquistfrq_plot = nyquistfrq / (2. * math.pi) else: omega_plot = omega_sys if nyquistfrq: nyquistfrq_plot = nyquistfrq sigma_plot = sigma if dB: ax_sigma.semilogx(omega_plot, 20 * np.log10(sigma_plot), color=color, *args, **kwargs) else: ax_sigma.loglog(omega_plot, sigma_plot, color=color, *args, **kwargs) if nyquistfrq_plot is not None: ax_sigma.axvline(x=nyquistfrq_plot, color=color) # Add a grid to the plot + labeling ax_sigma.grid(grid, which='both') ax_sigma.set_ylabel(""Magnitude (dB)"" if dB else ""Magnitude"") ax_sigma.set_xlabel(""Frequency (Hz)"" if Hz else ""Frequency (rad/sec)"") if len(syslist) == 1: return sigmas[0], omegas[0] else: return sigmas, omegas # ","def singular_values_plot(syslist, omega=None, plot=True, omega_limits=None, omega_num=None, *args, **kwargs): """"""Singular value plot for a system Plots a Singular Value plot for the system over a (optional) frequency range. Parameters ---------- syslist : linsys List of linear systems (single system is OK) omega : array_like List of frequencies in rad/sec to be used for frequency response plot : bool If True (default), plot magnitude and phase omega_limits : array_like of two values Limits of the to generate frequency vector. If Hz=True the limits are in Hz otherwise in rad/s. omega_num : int Number of samples to plot. Defaults to config.defaults['freqplot.number_of_samples']. Returns ------- sigma : ndarray (or list of ndarray if len(syslist) > 1)) singular values omega : ndarray (or list of ndarray if len(syslist) > 1)) frequency in rad/sec Other Parameters ---------------- grid : bool If True, plot grid lines on gain and phase plots. Default is set by `config.defaults['bode.grid']`. Examples -------- >>> den = [75, 1] >>> sys = ct.tf([[[87.8], [-86.4]], [[108.2], [-109.6]]], [[den, den], [den, den]]) >>> omega = np.logspace(-4, 1, 1000) >>> sigma, omega = singular_values_plot(sys) """""" # Make a copy of the kwargs dictionary since we will modify it kwargs = dict(kwargs) # Check to see if legacy 'Plot' keyword was used if 'Plot' in kwargs: import warnings warnings.warn(""'Plot' keyword is deprecated in bode_plot; use 'plot'"", FutureWarning) # Map 'Plot' keyword to 'plot' keyword plot = kwargs.pop('Plot') # Get values for params (and pop from list to allow keyword use in plot) dB = config._get_param('bode', 'dB', kwargs, _bode_defaults, pop=True) Hz = config._get_param('bode', 'Hz', kwargs, _bode_defaults, pop=True) grid = config._get_param('bode', 'grid', kwargs, _bode_defaults, pop=True) plot = config._get_param('bode', 'grid', plot, True) # If argument was a singleton, turn it into a tuple if not hasattr(syslist, '__iter__'): syslist = (syslist,) # Decide whether to go above Nyquist frequency omega_range_given = True if omega is not None else False if omega is None: omega_num = config._get_param( 'freqplot', 'number_of_samples', omega_num) if omega_limits is None: # Select a default range if none is provided omega = _default_frequency_range(syslist, number_of_samples=omega_num) else: omega_range_given = True omega_limits = np.asarray(omega_limits) if len(omega_limits) != 2: raise ValueError(""len(omega_limits) must be 2"") if Hz: omega_limits *= 2. * math.pi omega = np.logspace(np.log10(omega_limits[0]), np.log10(omega_limits[1]), num=omega_num, endpoint=True) if plot: fig = plt.gcf() ax_sigma = None # Get the current axes if they already exist for ax in fig.axes: if ax.get_label() == 'control-sigma': ax_sigma = ax # If no axes present, create them from scratch if ax_sigma is None: plt.clf() ax_sigma = plt.subplot(111, label='control-sigma') color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color'] sigmas, omegas, nyquistfrqs = [], [], [] for idx_sys, sys in enumerate(syslist): omega_sys = np.asarray(omega) if sys.isdtime(strict=True): nyquistfrq = math.pi / sys.dt if not omega_range_given: # limit up to and including nyquist frequency omega_sys = np.hstack(( omega_sys[omega_sys < nyquistfrq], nyquistfrq)) else: nyquistfrq = None fresp = sys(1j*omega if sys.isctime() else np.exp(1j * omega * sys.dt)) fresp = fresp.transpose((2, 0, 1)) sigma = np.linalg.svd(fresp, compute_uv=False) sigmas.append(sigma) omegas.append(omega_sys) nyquistfrqs.append(nyquistfrq) if plot: color = color_cycle[idx_sys % len(color_cycle)] nyquistfrq_plot = None if Hz: omega_plot = omega_sys / (2. * math.pi) if nyquistfrq: nyquistfrq_plot = nyquistfrq / (2. * math.pi) else: omega_plot = omega_sys if nyquistfrq: nyquistfrq_plot = nyquistfrq sigma_plot = sigma if dB: ax_sigma.semilogx(omega_plot, 20 * np.log10(sigma_plot), color=color, *args, **kwargs) else: ax_sigma.loglog(omega_plot, sigma_plot, color=color, *args, **kwargs) if nyquistfrq_plot is not None: ax_sigma.axvline(x=nyquistfrq_plot, color=color) # Add a grid to the plot + labeling ax_sigma.grid(grid, which='both') ax_sigma.set_ylabel(""Magnitude (dB)"" if dB else ""Magnitude"") ax_sigma.set_xlabel(""Frequency (Hz)"" if Hz else ""Frequency (rad/sec)"") if len(syslist) == 1: return sigmas[0], omegas[0] else: return sigmas, omegas # " 25188,"def build_class( name: str, basenames: Sequence[str] = (), doc: Optional[str] = None ) -> nodes.ClassDef: """"""Create and initialize an astroid ClassDef node."""""" node = nodes.ClassDef(name) basenodes: List[nodes.Name] = [] for base in basenames: basenode = nodes.Name(name=base) basenode.parent = node basenodes.append(basenode) node.postinit( bases=basenodes, body=[], decorators=None, doc_node=nodes.Const(value=doc) if doc else None, ) return node ","def build_class( name: str, basenames: Iterable[str] = (), doc: Optional[str] = None ) -> nodes.ClassDef: """"""Create and initialize an astroid ClassDef node."""""" node = nodes.ClassDef(name) basenodes: List[nodes.Name] = [] for base in basenames: basenode = nodes.Name(name=base) basenode.parent = node basenodes.append(basenode) node.postinit( bases=basenodes, body=[], decorators=None, doc_node=nodes.Const(value=doc) if doc else None, ) return node " 3360,"def _legacy_snql_query(params: Tuple[SnubaQuery, Hub, Mapping[str, str]]) -> RawResult: # Run the SnQL query and if something fails try the legacy version. query_data, thread_hub, headers = params query_params, forward, reverse = query_data try: snql_entity = query_params[""dataset""] query = json_to_snql(query_params, snql_entity) result = _raw_snql_query(query, Hub(thread_hub), headers) except urllib3.exceptions.HTTPError as err: raise SnubaError(err) return result, forward, reverse ","def _legacy_snql_query(params: Tuple[SnubaQuery, Hub, Mapping[str, str]]) -> RawResult: # Convert json query to SnQL and run it query_data, thread_hub, headers = params query_params, forward, reverse = query_data try: snql_entity = query_params[""dataset""] query = json_to_snql(query_params, snql_entity) result = _raw_snql_query(query, Hub(thread_hub), headers) except urllib3.exceptions.HTTPError as err: raise SnubaError(err) return result, forward, reverse " 31237,"def main(): install_logging('Prepare_Content_Packs_For_Testing.log') option = option_handler() packs_artifacts_path = option.artifacts_path extract_destination_path = option.extract_path storage_bucket_name = option.bucket_name service_account = option.service_account target_packs = option.pack_names if option.pack_names else """" build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4()) override_all_packs = option.override_all_packs signature_key = option.key_string id_set_path = option.id_set_path packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {} storage_base_path = option.storage_base_path remove_test_playbooks = option.remove_test_playbooks is_bucket_upload_flow = option.bucket_upload private_bucket_name = option.private_bucket_name circle_branch = option.circle_branch force_upload = option.force_upload # google cloud storage client initialized storage_client = init_storage_client(service_account) storage_bucket = storage_client.bucket(storage_bucket_name) if storage_base_path: GCPConfig.STORAGE_BASE_PATH = storage_base_path # download and extract index from public bucket index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket, extract_destination_path) # content repo client initialized content_repo = get_content_git_client(CONTENT_ROOT_PATH) current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path, is_bucket_upload_flow, circle_branch) # detect packs to upload pack_names = get_packs_names(target_packs, previous_commit_hash) extract_packs_artifacts(packs_artifacts_path, extract_destination_path) packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names if os.path.exists(os.path.join(extract_destination_path, pack_name))] if not option.override_all_packs: check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash, storage_bucket) # google cloud bigquery client initialized bq_client = init_bigquery_client(service_account) packs_statistic_df = get_packs_statistics_dataframe(bq_client) updated_private_packs = [] if private_bucket_name: # Add private packs to the index private_storage_bucket = storage_client.bucket(private_bucket_name) private_packs, _, _, updated_private_packs = update_index_with_priced_packs(private_storage_bucket, extract_destination_path, index_folder_path, pack_names) else: # skipping private packs logging.debug(""Skipping index update of priced packs"") private_packs = [] # clean index and gcs from non existing or invalid packs clean_non_existing_packs(index_folder_path, private_packs, storage_bucket) # starting iteration over packs for pack in packs_list: task_status, user_metadata = pack.load_user_metadata() if not task_status: pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value pack.cleanup() continue task_status, pack_content_items = pack.collect_content_items() if not task_status: pack.status = PackStatus.FAILED_COLLECT_ITEMS.name pack.cleanup() continue task_status, integration_images = pack.upload_integration_images(storage_bucket) if not task_status: pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name pack.cleanup() continue task_status, author_image = pack.upload_author_image(storage_bucket) if not task_status: pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name pack.cleanup() continue task_status = pack.format_metadata(user_metadata=user_metadata, pack_content_items=pack_content_items, integration_images=integration_images, author_image=author_image, index_folder_path=index_folder_path, packs_dependencies_mapping=packs_dependencies_mapping, build_number=build_number, commit_hash=current_commit_hash, packs_statistic_df=packs_statistic_df) if not task_status: pack.status = PackStatus.FAILED_METADATA_PARSING.name pack.cleanup() continue task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number) if not task_status: pack.status = PackStatus.FAILED_RELEASE_NOTES.name pack.cleanup() continue if not_updated_build: pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name pack.cleanup() continue task_status = pack.remove_unwanted_files(remove_test_playbooks) if not task_status: pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS pack.cleanup() continue task_status = pack.sign_pack(signature_key) if not task_status: pack.status = PackStatus.FAILED_SIGNING_PACKS.name pack.cleanup() continue task_status, zip_pack_path = pack.zip_pack() if not task_status: pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name pack.cleanup() continue task_status, pack_was_modified = pack.detect_modified(content_repo, index_folder_path, current_commit_hash, previous_commit_hash) if not task_status: pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name pack.cleanup() continue (task_status, skipped_pack_uploading, full_pack_path) = \ pack.upload_to_storage(zip_pack_path, pack.latest_version, storage_bucket, override_all_packs or pack_was_modified) if not task_status: pack.status = PackStatus.FAILED_UPLOADING_PACK.name pack.cleanup() continue task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path) if not task_status: pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name pack.cleanup() continue # in case that pack already exist at cloud storage path and in index, skipped further steps if skipped_pack_uploading and exists_in_index: pack.status = PackStatus.PACK_ALREADY_EXISTS.name pack.cleanup() continue task_status = pack.prepare_for_index_upload() if not task_status: pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name pack.cleanup() continue task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path, pack_version=pack.latest_version, hidden_pack=pack.hidden) if not task_status: pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name pack.cleanup() continue pack.status = PackStatus.SUCCESS.name # upload core packs json to bucket upload_core_packs_config(storage_bucket, build_number, index_folder_path) # finished iteration over content packs upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path, index_blob=index_blob, build_number=build_number, private_packs=private_packs, current_commit_hash=current_commit_hash, index_generation=index_generation, force_upload=force_upload, previous_commit_hash=previous_commit_hash) # upload id_set.json to bucket upload_id_set(storage_bucket, id_set_path) # get the lists of packs divided by their status successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list) # Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE) store_successful_and_failed_packs_in_ci_artifacts( packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs, updated_private_packs ) # summary of packs status print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow) ","def main(): install_logging('Prepare_Content_Packs_For_Testing.log') option = option_handler() packs_artifacts_path = option.artifacts_path extract_destination_path = option.extract_path storage_bucket_name = option.bucket_name service_account = option.service_account target_packs = option.pack_names if option.pack_names else """" build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4()) override_all_packs = option.override_all_packs signature_key = option.key_string id_set_path = option.id_set_path packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {} storage_base_path = option.storage_base_path remove_test_playbooks = option.remove_test_playbooks is_bucket_upload_flow = option.bucket_upload private_bucket_name = option.private_bucket_name circle_branch = option.circle_branch force_upload = option.force_upload # google cloud storage client initialized storage_client = init_storage_client(service_account) storage_bucket = storage_client.bucket(storage_bucket_name) if storage_base_path: GCPConfig.STORAGE_BASE_PATH = storage_base_path # download and extract index from public bucket index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket, extract_destination_path) # content repo client initialized content_repo = get_content_git_client(CONTENT_ROOT_PATH) current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path, is_bucket_upload_flow, circle_branch) # detect packs to upload pack_names = get_packs_names(target_packs, previous_commit_hash) extract_packs_artifacts(packs_artifacts_path, extract_destination_path) packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names if os.path.exists(os.path.join(extract_destination_path, pack_name))] if not option.override_all_packs: check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash, storage_bucket) # google cloud bigquery client initialized bq_client = init_bigquery_client(service_account) packs_statistic_df = get_packs_statistics_dataframe(bq_client) updated_private_packs_ids = [] if private_bucket_name: # Add private packs to the index private_storage_bucket = storage_client.bucket(private_bucket_name) private_packs, _, _, updated_private_packs = update_index_with_priced_packs(private_storage_bucket, extract_destination_path, index_folder_path, pack_names) else: # skipping private packs logging.debug(""Skipping index update of priced packs"") private_packs = [] # clean index and gcs from non existing or invalid packs clean_non_existing_packs(index_folder_path, private_packs, storage_bucket) # starting iteration over packs for pack in packs_list: task_status, user_metadata = pack.load_user_metadata() if not task_status: pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value pack.cleanup() continue task_status, pack_content_items = pack.collect_content_items() if not task_status: pack.status = PackStatus.FAILED_COLLECT_ITEMS.name pack.cleanup() continue task_status, integration_images = pack.upload_integration_images(storage_bucket) if not task_status: pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name pack.cleanup() continue task_status, author_image = pack.upload_author_image(storage_bucket) if not task_status: pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name pack.cleanup() continue task_status = pack.format_metadata(user_metadata=user_metadata, pack_content_items=pack_content_items, integration_images=integration_images, author_image=author_image, index_folder_path=index_folder_path, packs_dependencies_mapping=packs_dependencies_mapping, build_number=build_number, commit_hash=current_commit_hash, packs_statistic_df=packs_statistic_df) if not task_status: pack.status = PackStatus.FAILED_METADATA_PARSING.name pack.cleanup() continue task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number) if not task_status: pack.status = PackStatus.FAILED_RELEASE_NOTES.name pack.cleanup() continue if not_updated_build: pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name pack.cleanup() continue task_status = pack.remove_unwanted_files(remove_test_playbooks) if not task_status: pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS pack.cleanup() continue task_status = pack.sign_pack(signature_key) if not task_status: pack.status = PackStatus.FAILED_SIGNING_PACKS.name pack.cleanup() continue task_status, zip_pack_path = pack.zip_pack() if not task_status: pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name pack.cleanup() continue task_status, pack_was_modified = pack.detect_modified(content_repo, index_folder_path, current_commit_hash, previous_commit_hash) if not task_status: pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name pack.cleanup() continue (task_status, skipped_pack_uploading, full_pack_path) = \ pack.upload_to_storage(zip_pack_path, pack.latest_version, storage_bucket, override_all_packs or pack_was_modified) if not task_status: pack.status = PackStatus.FAILED_UPLOADING_PACK.name pack.cleanup() continue task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path) if not task_status: pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name pack.cleanup() continue # in case that pack already exist at cloud storage path and in index, skipped further steps if skipped_pack_uploading and exists_in_index: pack.status = PackStatus.PACK_ALREADY_EXISTS.name pack.cleanup() continue task_status = pack.prepare_for_index_upload() if not task_status: pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name pack.cleanup() continue task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path, pack_version=pack.latest_version, hidden_pack=pack.hidden) if not task_status: pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name pack.cleanup() continue pack.status = PackStatus.SUCCESS.name # upload core packs json to bucket upload_core_packs_config(storage_bucket, build_number, index_folder_path) # finished iteration over content packs upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path, index_blob=index_blob, build_number=build_number, private_packs=private_packs, current_commit_hash=current_commit_hash, index_generation=index_generation, force_upload=force_upload, previous_commit_hash=previous_commit_hash) # upload id_set.json to bucket upload_id_set(storage_bucket, id_set_path) # get the lists of packs divided by their status successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list) # Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE) store_successful_and_failed_packs_in_ci_artifacts( packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs, updated_private_packs ) # summary of packs status print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow) " 43665,"def excitations(n_electrons, n_spin_orbitals, delta_sz=0): r""""""Generates single and double excitations from a Hartree-Fock reference state. Single and double excitations can be generated by acting with the excitation operators :math:`\hat T_1` and :math:`\hat T_2` on the Hartree-Fock (HF) reference state: .. math: && \hat{T}_1 \vert \mathrm{HF} \rangle = \sum_{r \in \mathrm{occ} \\ p \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF} \rangle \\ && \hat{T}_2 \vert \mathrm{HF} \rangle = \sum_{r>s \in \mathrm{occ} \\ p>q \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_q^\dagger \hat{c}_r \hat{c}_s \vert \mathrm{HF} \rangle, where the indices :math:`r, s` and :math:`p, q` run over the occupied (occ) and unoccupied, referred to as virtual (virt), *spin-orbitals* and :math:`\hat c` and :math:`\hat c^\dagger` are the electron annihilation and creation operators, respectively. | .. figure:: ../../_static/qchem/sd_excitations.png :align: center :width: 70% | Args: n_electrons (int): Number of electrons. If an active space is defined, 'n_electrons' is the number of active electrons. n_spin_orbitals (int): Number of spin-orbitals. If an active space is defined, 'n_spin_orbitals' is the number of active spin-orbitals. delta_sz (int): Specifies the selection rules ``sz[p] - sz[r] = delta_sz`` and ``sz[p] + sz[p] - sz[r] - sz[s] = delta_sz`` for the spin-projection ``sz`` of the orbitals involved in the single and double excitations, respectively. ``delta_sz`` can take the values :math:`0`, :math:`\pm 1` and :math:`\pm 2`. Returns: tuple(list, list): lists with the indices of the spin-orbitals involved in the single and double excitations **Example** >>> n_electrons = 2 >>> n_spin_orbitals = 4 >>> singles, doubles = excitations(n_electrons, n_spin_orbitals) >>> print(singles) [[0, 2], [1, 3]] >>> print(doubles) [[0, 1, 2, 3]] """""" if not n_electrons > 0: raise ValueError( ""The number of active electrons has to be greater than 0 \n"" ""Got n_electrons = {}"".format(n_electrons) ) if n_spin_orbitals <= n_electrons: raise ValueError( ""The number of active spin-orbitals ({}) "" ""has to be greater than the number of active electrons ({})."".format( n_spin_orbitals, n_electrons ) ) if delta_sz not in (0, 1, -1, 2, -2): raise ValueError( ""Expected values for 'delta_sz' are 0, +/- 1 and +/- 2 but got ({})."".format(delta_sz) ) # define the single-particle state spin quantum number 'sz' sz = np.array([0.5 if (i % 2 == 0) else -0.5 for i in range(n_spin_orbitals)]) singles = [ [r, p] for r in range(n_electrons) for p in range(n_electrons, n_spin_orbitals) if sz[p] - sz[r] == delta_sz ] doubles = [ [s, r, q, p] for s in range(n_electrons - 1) for r in range(s + 1, n_electrons) for q in range(n_electrons, n_spin_orbitals - 1) for p in range(q + 1, n_spin_orbitals) if (sz[p] + sz[q] - sz[r] - sz[s]) == delta_sz ] return singles, doubles ","def excitations(n_electrons, n_spin_orbitals, delta_sz=0): r""""""Generates single and double excitations from a Hartree-Fock reference state. Single and double excitations can be generated by acting with the excitation operators :math:`\hat T_1` and :math:`\hat T_2` on the Hartree-Fock (HF) reference state: .. math: && \hat{T}_1 \vert \mathrm{HF} \rangle = \sum_{r \in \mathrm{occ} \\ p \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF} \rangle \\ && \hat{T}_2 \vert \mathrm{HF} \rangle = \sum_{r>s \in \mathrm{occ} \\ p>q \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_q^\dagger \hat{c}_r \hat{c}_s \vert \mathrm{HF} \rangle, where the indices :math:`r, s` and :math:`p, q` run over the occupied (occ) and unoccupied, referred to as virtual (virt), *spin-orbitals* and :math:`\hat c` and :math:`\hat c^\dagger` are the electron annihilation and creation operators, respectively. | .. figure:: ../../_static/qchem/sd_excitations.png :align: center :width: 70% | Args: n_electrons (int): Number of electrons. If an active space is defined, this is the number of active electrons. n_spin_orbitals (int): Number of spin-orbitals. If an active space is defined, 'n_spin_orbitals' is the number of active spin-orbitals. delta_sz (int): Specifies the selection rules ``sz[p] - sz[r] = delta_sz`` and ``sz[p] + sz[p] - sz[r] - sz[s] = delta_sz`` for the spin-projection ``sz`` of the orbitals involved in the single and double excitations, respectively. ``delta_sz`` can take the values :math:`0`, :math:`\pm 1` and :math:`\pm 2`. Returns: tuple(list, list): lists with the indices of the spin-orbitals involved in the single and double excitations **Example** >>> n_electrons = 2 >>> n_spin_orbitals = 4 >>> singles, doubles = excitations(n_electrons, n_spin_orbitals) >>> print(singles) [[0, 2], [1, 3]] >>> print(doubles) [[0, 1, 2, 3]] """""" if not n_electrons > 0: raise ValueError( ""The number of active electrons has to be greater than 0 \n"" ""Got n_electrons = {}"".format(n_electrons) ) if n_spin_orbitals <= n_electrons: raise ValueError( ""The number of active spin-orbitals ({}) "" ""has to be greater than the number of active electrons ({})."".format( n_spin_orbitals, n_electrons ) ) if delta_sz not in (0, 1, -1, 2, -2): raise ValueError( ""Expected values for 'delta_sz' are 0, +/- 1 and +/- 2 but got ({})."".format(delta_sz) ) # define the single-particle state spin quantum number 'sz' sz = np.array([0.5 if (i % 2 == 0) else -0.5 for i in range(n_spin_orbitals)]) singles = [ [r, p] for r in range(n_electrons) for p in range(n_electrons, n_spin_orbitals) if sz[p] - sz[r] == delta_sz ] doubles = [ [s, r, q, p] for s in range(n_electrons - 1) for r in range(s + 1, n_electrons) for q in range(n_electrons, n_spin_orbitals - 1) for p in range(q + 1, n_spin_orbitals) if (sz[p] + sz[q] - sz[r] - sz[s]) == delta_sz ] return singles, doubles " 3,"def get_stack_trace(*, skip=0): """""" Return a processed stack trace for the current call stack. If the ``ENABLE_STACKTRACES`` setting is False, return an empty :class:`list`. Otherwise return a :class:`list` of processed stack frame tuples (file name, line number, function name, source line, frame locals) for the current call stack. The first entry in the list will be for the bottom of the stack and the last entry will be for the top of the stack. ``skip`` is an :class:`int` indicating the number of stack frames above the frame for this function to omit from the stack trace. The default value of ``0`` means that the entry for the caller of this function will be the last entry in the returned stack trace. """""" config = dt_settings.get_config() if not config[""ENABLE_STACKTRACES""]: return [] stack_trace_recorder = getattr(_local_data, ""stack_trace_recorder"", None) if stack_trace_recorder is None: stack_trace_recorder = _StackTraceRecorder() _local_data.stack_trace_recorder = stack_trace_recorder return stack_trace_recorder.get_stack_trace( excluded_modules=config[""HIDE_IN_STACKTRACES""], include_locals=config[""ENABLE_STACKTRACES_LOCALS""], skip=skip + 1, ) ","def get_stack_trace(*, skip=0): """""" Return a processed stack trace for the current call stack. If the ``ENABLE_STACKTRACES`` setting is False, return an empty :class:`list`. Otherwise return a :class:`list` of processed stack frame tuples (file name, line number, function name, source line, frame locals) for the current call stack. The first entry in the list will be for the bottom of the stack and the last entry will be for the top of the stack. ``skip`` is an :class:`int` indicating the number of stack frames above the frame for this function to omit from the stack trace. The default value of ``0`` means that the entry for the caller of this function will be the last entry in the returned stack trace. """""" config = dt_settings.get_config() if not config[""ENABLE_STACKTRACES""]: return [] stack_trace_recorder = getattr(_local_data, ""stack_trace_recorder"", None) if stack_trace_recorder is None: stack_trace_recorder = _StackTraceRecorder() _local_data.stack_trace_recorder = stack_trace_recorder return stack_trace_recorder.get_stack_trace( excluded_modules=config[""HIDE_IN_STACKTRACES""], include_locals=config[""ENABLE_STACKTRACES_LOCALS""], skip=skip + 1, # Skip the frame for this function. ) " 49956,"def autocontrast(image, cutoff=0, ignore=None): """""" Maximize (normalize) image contrast. This function calculates a histogram of the input image, removes **cutoff** percent of the lightest and darkest pixels from the histogram, and remaps the image so that the darkest pixel becomes black (0), and the lightest becomes white (255). :param image: The image to process. :param cutoff: How many percent to cut off from the histogram. :param ignore: The background pixel value (use None for no background). :return: An image. """""" histogram = image.histogram() lut = [] for layer in range(0, len(histogram), 256): h = histogram[layer : layer + 256] if ignore is not None: # get rid of outliers try: h[ignore] = 0 except TypeError: # assume sequence for ix in ignore: h[ix] = 0 if cutoff: # cut off pixels from both ends of the histogram if isinstance(cutoff, int): cutoff = (cutoff, cutoff) elif isinstance(cutoff, tuple): pass else: raise ValueError(""the cutoff can only be a integer or tuple"") # get number of pixels n = 0 for ix in range(256): n = n + h[ix] # remove cutoff% pixels from the low end cut = n * cutoff[0] // 100 for lo in range(256): if cut > h[lo]: cut = cut - h[lo] h[lo] = 0 else: h[lo] -= cut cut = 0 if cut <= 0: break # remove cutoff% samples from the hi end cut = n * cutoff[1] // 100 for hi in range(255, -1, -1): if cut > h[hi]: cut = cut - h[hi] h[hi] = 0 else: h[hi] -= cut cut = 0 if cut <= 0: break # find lowest/highest samples after preprocessing for lo in range(256): if h[lo]: break for hi in range(255, -1, -1): if h[hi]: break if hi <= lo: # don't bother lut.extend(list(range(256))) else: scale = 255.0 / (hi - lo) offset = -lo * scale for ix in range(256): ix = int(ix * scale + offset) if ix < 0: ix = 0 elif ix > 255: ix = 255 lut.append(ix) return _lut(image, lut) ","def autocontrast(image, cutoff=0, ignore=None): """""" Maximize (normalize) image contrast. This function calculates a histogram of the input image, removes **cutoff** percent of the lightest and darkest pixels from the histogram, and remaps the image so that the darkest pixel becomes black (0), and the lightest becomes white (255). :param image: The image to process. :param cutoff: How many percent to cut off from the histogram. :param ignore: The background pixel value (use None for no background). :return: An image. """""" histogram = image.histogram() lut = [] for layer in range(0, len(histogram), 256): h = histogram[layer : layer + 256] if ignore is not None: # get rid of outliers try: h[ignore] = 0 except TypeError: # assume sequence for ix in ignore: h[ix] = 0 if cutoff: # cut off pixels from both ends of the histogram if not isinstance(cutoff, tuple): cutoff = (cutoff, cutoff) # get number of pixels n = 0 for ix in range(256): n = n + h[ix] # remove cutoff% pixels from the low end cut = n * cutoff[0] // 100 for lo in range(256): if cut > h[lo]: cut = cut - h[lo] h[lo] = 0 else: h[lo] -= cut cut = 0 if cut <= 0: break # remove cutoff% samples from the hi end cut = n * cutoff[1] // 100 for hi in range(255, -1, -1): if cut > h[hi]: cut = cut - h[hi] h[hi] = 0 else: h[hi] -= cut cut = 0 if cut <= 0: break # find lowest/highest samples after preprocessing for lo in range(256): if h[lo]: break for hi in range(255, -1, -1): if h[hi]: break if hi <= lo: # don't bother lut.extend(list(range(256))) else: scale = 255.0 / (hi - lo) offset = -lo * scale for ix in range(256): ix = int(ix * scale + offset) if ix < 0: ix = 0 elif ix > 255: ix = 255 lut.append(ix) return _lut(image, lut) " 39554,"def get_item(json_object: dict, *attributes): """""" Return `item` by going through all the `attributes` present in the `json_object` """""" if not json_object: LOGGER.error(f""json_object is empty: {json_object}"") return None item = json_object for attribute in attributes: if not attribute in item: LOGGER.error(f""Missing attribute {attribute} in {item}"") return None item = item[attribute] return item ","def get_item(json_object: dict, *attributes): """""" Return `item` by going through all the `attributes` present in the `json_object` """""" if not json_object: LOGGER.error(f""json_object is empty: {json_object}"") return None item = json_object for attribute in attributes: if attribute not in item: LOGGER.error(f""Missing attribute {attribute} in {item}"") return None item = item[attribute] return item " 4115,"def p_c_base_type(s, nonempty = 0, templates = None): if s.sy == '(': return p_c_complex_base_type(s, templates = templates) else: return p_c_simple_base_type(s, nonempty = nonempty, templates = templates) ","def p_c_base_type(s, nonempty=False, templates=None): if s.sy == '(': return p_c_complex_base_type(s, templates = templates) else: return p_c_simple_base_type(s, nonempty = nonempty, templates = templates) " 31057,"def viper_download(client, args): file_hash = args.get('file_hash') if len(file_hash) == 64: sample_info = client.sample_information(file_hash) sample = sample_download(file_hash) if sample.status_code == 200: filename = sample_info['data']['name'] viper_id = sample_info['data']['id'] mime = sample_info['data']['mime'] file_type = sample_info['data']['type'] size = sample_info['data']['size'] table_object = [{""File Name"": filename, ""File Hash"": file_hash, ""ViperID"": viper_id, ""MIME"": mime, ""File Type"": file_type, ""Size"": size}] context_object = {'Viper': {""Name"": filename, ""SHA256"": file_hash, ""ViperID"": viper_id, ""MIME"": mime, ""Type"": file_type, ""Size"": size}} demisto.results({'ContentsFormat': formats['table'], 'Type': entryTypes['note'], 'Contents': table_object, ""EntryContext"": context_object}) demisto.results(fileResult(filename, sample.content)) else: return_error('No valid sample found') else: return_error('Hash length is invalid.') ","def viper_download(client, args): file_hash = args.get('file_hash') if len(file_hash) == 64: sample_info = client.sample_information(file_hash) sample = client.sample_download(file_hash) if sample.status_code == 200: filename = sample_info['data']['name'] viper_id = sample_info['data']['id'] mime = sample_info['data']['mime'] file_type = sample_info['data']['type'] size = sample_info['data']['size'] table_object = [{""File Name"": filename, ""File Hash"": file_hash, ""ViperID"": viper_id, ""MIME"": mime, ""File Type"": file_type, ""Size"": size}] context_object = {'Viper': {""Name"": filename, ""SHA256"": file_hash, ""ViperID"": viper_id, ""MIME"": mime, ""Type"": file_type, ""Size"": size}} demisto.results({'ContentsFormat': formats['table'], 'Type': entryTypes['note'], 'Contents': table_object, ""EntryContext"": context_object}) demisto.results(fileResult(filename, sample.content)) else: return_error('No valid sample found') else: return_error('Hash length is invalid.') " 41949,"def test_suggest_with_step_multivariate_parzen_estimator() -> None: parameters = _ParzenEstimatorParameters( consider_prior=False, prior_weight=0.0, consider_magic_clip=False, consider_endpoints=False, weights=lambda x: np.arange(x) + 1.0, ) # Define search space for distribution with step argument and true ranges search_space = { ""c"": distributions.DiscreteUniformDistribution(low=1.0, high=7.0, q=3.0), ""d"": distributions.IntUniformDistribution(1, 5, step=2), } multivariate_samples = {""c"": np.array([4]), ""d"": np.array([1])} valid_ranges = {""c"": set(np.arange(1.0, 10.0, 3.0)), ""d"": set(np.arange(1, 7, 2))} with patch(_PRECOMPUTE_SIGMAS0, return_value=np.ones(2)): mpe = _MultivariateParzenEstimator(multivariate_samples, search_space, parameters) # Draw 10 samples, and check if all valid values are sampled. output_multivariate_samples = mpe.sample(np.random.RandomState(0), 10) for param_name in output_multivariate_samples: assert set(output_multivariate_samples[param_name]) == valid_ranges[param_name] ","def test_suggest_with_step_multivariate_parzen_estimator() -> None: parameters = _ParzenEstimatorParameters( consider_prior=False, prior_weight=0.0, consider_magic_clip=False, consider_endpoints=False, weights=lambda x: np.arange(x) + 1.0, ) # Define search space for distribution with step argument and true ranges search_space = { ""c"": distributions.DiscreteUniformDistribution(low=1.0, high=7.0, q=3.0), ""d"": distributions.IntUniformDistribution(low=1, high=5, step=2), } multivariate_samples = {""c"": np.array([4]), ""d"": np.array([1])} valid_ranges = {""c"": set(np.arange(1.0, 10.0, 3.0)), ""d"": set(np.arange(1, 7, 2))} with patch(_PRECOMPUTE_SIGMAS0, return_value=np.ones(2)): mpe = _MultivariateParzenEstimator(multivariate_samples, search_space, parameters) # Draw 10 samples, and check if all valid values are sampled. output_multivariate_samples = mpe.sample(np.random.RandomState(0), 10) for param_name in output_multivariate_samples: assert set(output_multivariate_samples[param_name]) == valid_ranges[param_name] " 8570,"def do_xmlrpc_rw(cobbler_api: CobblerAPI, port): """""" This trys to bring up the Cobbler xmlrpc_api and restart it if it fails. :param cobbler_api: The cobbler_api instance which is used for this method. :param port: The port where the xmlrpc api should run on. """""" xinterface = remote.ProxiedXMLRPCInterface(cobbler_api, remote.CobblerXMLRPCInterface) server = remote.CobblerXMLRPCServer(('127.0.0.1', port)) server.logRequests = 0 # don't print stuff logger.debug(""XMLRPC running on %s"", port) server.register_instance(xinterface) start_time = """" try: import psutil p = psutil.Process(os.getpid()) start_time = "" in %s seconds"" % str(time.time() - p.create_time()) except ModuleNotFoundError: # This is not critical, but debug only - just install python3-psutil pass while True: try: logger.info(""Cobbler startup completed"" + start_time) server.serve_forever() except IOError: # interrupted? try to serve again time.sleep(0.5) ","def do_xmlrpc_rw(cobbler_api: CobblerAPI, port): """""" This trys to bring up the Cobbler xmlrpc_api and restart it if it fails. :param cobbler_api: The cobbler_api instance which is used for this method. :param port: The port where the xmlrpc api should run on. """""" xinterface = remote.ProxiedXMLRPCInterface(cobbler_api, remote.CobblerXMLRPCInterface) server = remote.CobblerXMLRPCServer(('127.0.0.1', port)) server.logRequests = 0 # don't print stuff logger.debug(""XMLRPC running on %s"", port) server.register_instance(xinterface) start_time = """" try: import psutil p = psutil.Process(os.getpid()) start_time = "" in %s seconds"" % str(time.time() - p.create_time()) except ModuleNotFoundError: # This is not critical, but debug only - just install python3-psutil pass while True: try: logger.info(""Cobbler startup completed %s"", start_time) server.serve_forever() except IOError: # interrupted? try to serve again time.sleep(0.5) " 45346,"def _get_num_actors(num_actors): """""" Get number of actors to create. In case `num_actors` is None, integer number of actors will be computed by condition 2 CPUs per 1 actor. Parameters ---------- num_actors : int or None Desired number of actors. Returns ------- int Number of actors to create. """""" min_cpus_per_node = _get_min_cpus_per_node() if num_actors is None: num_actors_per_node = max(1, int(min_cpus_per_node // 2)) return num_actors_per_node * len(ray.nodes()) elif isinstance(num_actors, int): assert ( num_actors % len(ray.nodes()) == 0 ), ""`num_actors` must be a multiple to number of nodes in Ray cluster."" return num_actors else: RuntimeError(""`num_actors` must be int or None"") ","def _get_num_actors(num_actors): """""" Get number of actors to create. In case `num_actors` is None, integer number of actors will be computed by condition 2 CPUs per 1 actor. Parameters ---------- num_actors : int, default: None Desired number of actors. Returns ------- int Number of actors to create. """""" min_cpus_per_node = _get_min_cpus_per_node() if num_actors is None: num_actors_per_node = max(1, int(min_cpus_per_node // 2)) return num_actors_per_node * len(ray.nodes()) elif isinstance(num_actors, int): assert ( num_actors % len(ray.nodes()) == 0 ), ""`num_actors` must be a multiple to number of nodes in Ray cluster."" return num_actors else: RuntimeError(""`num_actors` must be int or None"") " 31403,"def get_violation_list_command(client: Client, args: Dict[str, Any]) -> CommandResults: """""" :type client: Client :param client: Gamma client :param args: all command arguments, usually passed from demisto.args() args['name'] is used as input name :return: A CommandResults object that is then passed to return_results :rtype: ``CommandResults`` """""" minimum_violation = args.get(""minimum_violation"", 1) limit = args.get(""limit"", 10) if not int(minimum_violation) >= 1: raise ValueError(""minimum_violation must be greater than 0"") if not int(limit) >= 1 or not int(limit) <= 100: raise ValueError(""limit must be between 1 and 100"") v_list = client.get_violation_list(minimum_violation, limit) note = '' if v_list['response'][0]['violation_id'] != int(minimum_violation): note += f'Violation with the minimum_violation ID does not exist. Showing violations pulled from the next available ID: {v_list[""response""][0][""violation_id""]} \r' human_readable = note for i in v_list['response']: violation_id = i['violation_id'] human_readable += f'### Violation {i[""violation_id""]} \r' \ f'|Violation ID|Status|Timestamp|Dashboard URL|User|App Name| \r' \ f'|---|---|---|---|---|---| \r' \ f'| {violation_id} | {i[""violation_status""]} | {timestamp_to_datestring(i[""violation_event_timestamp""]*1000)} | {i[""dashboard_url""]} | {i[""user""]} | {i[""app_name""]} | \r' return CommandResults( readable_output=human_readable, outputs_prefix=""GammaViolation"", outputs_key_field=""violation_id"", outputs=v_list, raw_response=v_list ) ","def get_violation_list_command(client: Client, args: Dict[str, Any]) -> CommandResults: """""" :type client: Client :param client: Gamma client :param args: all command arguments, usually passed from demisto.args() args['name'] is used as input name :return: A CommandResults object that is then passed to return_results :rtype: ``CommandResults`` """""" minimum_violation = args.get(""minimum_violation"", 1) limit = args.get(""limit"", 10) if int(minimum_violation) < 1: raise ValueError(""minimum_violation must be greater than 0"") if not int(limit) >= 1 or not int(limit) <= 100: raise ValueError(""limit must be between 1 and 100"") v_list = client.get_violation_list(minimum_violation, limit) note = '' if v_list['response'][0]['violation_id'] != int(minimum_violation): note += f'Violation with the minimum_violation ID does not exist. Showing violations pulled from the next available ID: {v_list[""response""][0][""violation_id""]} \r' human_readable = note for i in v_list['response']: violation_id = i['violation_id'] human_readable += f'### Violation {i[""violation_id""]} \r' \ f'|Violation ID|Status|Timestamp|Dashboard URL|User|App Name| \r' \ f'|---|---|---|---|---|---| \r' \ f'| {violation_id} | {i[""violation_status""]} | {timestamp_to_datestring(i[""violation_event_timestamp""]*1000)} | {i[""dashboard_url""]} | {i[""user""]} | {i[""app_name""]} | \r' return CommandResults( readable_output=human_readable, outputs_prefix=""GammaViolation"", outputs_key_field=""violation_id"", outputs=v_list, raw_response=v_list ) " 50585,"def create_rc_file(name, filename, libraries): archtitecture = ""amd64"" if sys.maxsize > 2**32 else ""x86"" dependencies = [] for lib in libraries: dependencies.append( ' """"\n' ' """"\n' ' """"\n' ' """"\n' ' """"\n' % (lib, archtitecture)) rc_body = ( '#include \n' '\n' 'LANGUAGE 0x00, 0x00\n' '\n' 'ISOLATIONAWARE_MANIFEST_RESOURCE_ID RT_MANIFEST\n' 'BEGIN\n' ' """"\n' ' """"\n' ' """"\n' ' """"\n' '%s' ' """"\n' 'END\n' % (name, archtitecture, filename, """".join(dependencies))) with open(name + "".rc"", ""w"", encoding='ascii') as f: f.write(rc_body) ","def create_rc_file(name, filename, libraries): archtitecture = ""amd64"" if sys.maxsize > 2**32 else ""x86"" dependencies = [] for lib in libraries: dependencies.append( ' """"\n' ' """"\n' ' """"\n' ' """"\n' ' """"\n' % (lib, archtitecture)) rc_body = textwrap.dedent(''' #include LANGUAGE 0x00, 0x00 ISOLATIONAWARE_MANIFEST_RESOURCE_ID RT_MANIFEST BEGIN """" """" """" """" %s """" END''') % (name, archtitecture, filename, """".join(dependencies)) with open(name + "".rc"", ""w"", encoding='ascii') as f: f.write(rc_body) " 17557,"def artifact_status_put_req(artifact_id, user_id, visibility): """"""Set the status of the artifact given Parameters ---------- artifact_id : int Artifact being acted on user_id : str The user requesting the action visibility : {'sandbox', 'awaiting_approval', 'private', 'public'} What to change the visibility to Returns ------- dict Status of action, in the form {'status': status, 'message': msg} status: status of the action, either success or error message: Human readable message for status """""" if visibility not in get_visibilities(): return {'status': 'error', 'message': 'Unknown visibility value: %s' % visibility} pd = Artifact(int(artifact_id)) sid = pd.study.id access_error = check_access(sid, user_id) if access_error: return access_error user = User(str(user_id)) status = 'success' msg = 'Artifact visibility changed to %s' % visibility # Set the approval to private if needs approval and admin if visibility == 'private': if not qiita_config.require_approval: pd.visibility = 'private' # Set the approval to private if approval not required elif user.level == 'admin': pd.visibility = 'private' # Trying to set approval without admin privileges else: status = 'error' msg = 'User does not have permissions to approve change' else: pd.visibility = visibility LogEntry.create('Warning', '%s changed artifact %s (%d) to %s' % ( user_id, artifact_id, sid, visibility)) return {'status': status, 'message': msg} ","def artifact_status_put_req(artifact_id, user_id, visibility): """"""Set the status of the artifact given Parameters ---------- artifact_id : int Artifact being acted on user_id : str The user requesting the action visibility : {'sandbox', 'awaiting_approval', 'private', 'public'} What to change the visibility to Returns ------- dict Status of action, in the form {'status': status, 'message': msg} status: status of the action, either success or error message: Human readable message for status """""" if visibility not in get_visibilities(): return {'status': 'error', 'message': 'Unknown visibility value: %s' % visibility} pd = Artifact(int(artifact_id)) sid = pd.study.id access_error = check_access(sid, user_id) if access_error: return access_error user = User(str(user_id)) status = 'success' msg = 'Artifact visibility changed to %s' % visibility # Set the approval to private if needs approval and admin if visibility == 'private': if not qiita_config.require_approval: pd.visibility = 'private' # Set the approval to private if approval not required elif user.level == 'admin': pd.visibility = 'private' # Trying to set approval without admin privileges else: status = 'error' msg = 'User does not have permissions to approve change' else: pd.visibility = visibility LogEntry.create('Warning', '%s changed artifact %s (study %d) to %s' % ( user_id, artifact_id, sid, visibility)) return {'status': status, 'message': msg} " 41084,"def inference( output_dir: str, maxlenratio: float, minlenratio: float, batch_size: int, dtype: str, beam_size: int, ngpu: int, seed: int, ctc_weight: float, lm_weight: float, penalty: float, nbest: int, num_workers: int, log_level: Union[int, str], data_path_and_name_and_type: Sequence[Tuple[str, str, str]], key_file: Optional[str], asr_train_config: str, asr_model_file: str, lm_train_config: Optional[str], lm_file: Optional[str], word_lm_train_config: Optional[str], word_lm_file: Optional[str], token_type: Optional[str], bpemodel: Optional[str], allow_variable_data_keys: bool, ): assert check_argument_types() if batch_size > 1: raise NotImplementedError(""batch decoding is not implemented"") if word_lm_train_config is not None: raise NotImplementedError(""Word LM is not implemented"") if ngpu > 1: raise NotImplementedError(""only single GPU decoding is supported"") logging.basicConfig( level=log_level, format=""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"", ) if ngpu >= 1: device = ""cuda"" else: device = ""cpu"" # 1. Set random-seed set_all_random_seed(seed) # 2. Build speech2text speech2text = Speech2Text( asr_train_config=asr_train_config, asr_model_file=asr_model_file, lm_train_config=lm_train_config, lm_file=lm_file, token_type=token_type, bpemodel=bpemodel, device=device, maxlenratio=maxlenratio, minlenratio=minlenratio, dtype=dtype, beam_size=beam_size, ctc_weight=ctc_weight, lm_weight=lm_weight, penalty=penalty, nbest=nbest, ) # 3. Build data-iterator loader = ASRTask.build_streaming_iterator( data_path_and_name_and_type, dtype=dtype, batch_size=batch_size, key_file=key_file, num_workers=num_workers, preprocess_fn=ASRTask.build_preprocess_fn(speech2text.asr_train_args, False), collate_fn=ASRTask.build_collate_fn(speech2text.asr_train_args, False), allow_variable_data_keys=allow_variable_data_keys, inference=True, ) # 7 .Start for-loop # FIXME(kamo): The output format should be discussed about with DatadirWriter(output_dir) as writer: for keys, batch in loader: assert isinstance(batch, dict), type(batch) assert all(isinstance(s, str) for s in keys), keys _bs = len(next(iter(batch.values()))) assert len(keys) == _bs, f""{len(keys)} != {_bs}"" batch = {k: v[0] for k, v in batch.items() if not k.endswith(""_lengths"")} # N-best list of (text, token, token_int, hyp_object) try: results = speech2text(**batch) except TooShortUttError: logging.info( ""Utterance is too short for subsampling, return empty results"" ) results = [["" "", [""""], [2], ""nan""]] * nbest # Only supporting batch_size==1 key = keys[0] for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results): # Create a directory: outdir/{n}best_recog ibest_writer = writer[f""{n}best_recog""] # Write the result to each file ibest_writer[""token""][key] = "" "".join(token) ibest_writer[""token_int""][key] = "" "".join(map(str, token_int)) ibest_writer[""score""][key] = str(hyp.score) if text is not None: ibest_writer[""text""][key] = text ","def inference( output_dir: str, maxlenratio: float, minlenratio: float, batch_size: int, dtype: str, beam_size: int, ngpu: int, seed: int, ctc_weight: float, lm_weight: float, penalty: float, nbest: int, num_workers: int, log_level: Union[int, str], data_path_and_name_and_type: Sequence[Tuple[str, str, str]], key_file: Optional[str], asr_train_config: str, asr_model_file: str, lm_train_config: Optional[str], lm_file: Optional[str], word_lm_train_config: Optional[str], word_lm_file: Optional[str], token_type: Optional[str], bpemodel: Optional[str], allow_variable_data_keys: bool, ): assert check_argument_types() if batch_size > 1: raise NotImplementedError(""batch decoding is not implemented"") if word_lm_train_config is not None: raise NotImplementedError(""Word LM is not implemented"") if ngpu > 1: raise NotImplementedError(""only single GPU decoding is supported"") logging.basicConfig( level=log_level, format=""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"", ) if ngpu >= 1: device = ""cuda"" else: device = ""cpu"" # 1. Set random-seed set_all_random_seed(seed) # 2. Build speech2text speech2text = Speech2Text( asr_train_config=asr_train_config, asr_model_file=asr_model_file, lm_train_config=lm_train_config, lm_file=lm_file, token_type=token_type, bpemodel=bpemodel, device=device, maxlenratio=maxlenratio, minlenratio=minlenratio, dtype=dtype, beam_size=beam_size, ctc_weight=ctc_weight, lm_weight=lm_weight, penalty=penalty, nbest=nbest, ) # 3. Build data-iterator loader = ASRTask.build_streaming_iterator( data_path_and_name_and_type, dtype=dtype, batch_size=batch_size, key_file=key_file, num_workers=num_workers, preprocess_fn=ASRTask.build_preprocess_fn(speech2text.asr_train_args, False), collate_fn=ASRTask.build_collate_fn(speech2text.asr_train_args, False), allow_variable_data_keys=allow_variable_data_keys, inference=True, ) # 7 .Start for-loop # FIXME(kamo): The output format should be discussed about with DatadirWriter(output_dir) as writer: for keys, batch in loader: assert isinstance(batch, dict), type(batch) assert all(isinstance(s, str) for s in keys), keys _bs = len(next(iter(batch.values()))) assert len(keys) == _bs, f""{len(keys)} != {_bs}"" batch = {k: v[0] for k, v in batch.items() if not k.endswith(""_lengths"")} # N-best list of (text, token, token_int, hyp_object) try: results = speech2text(**batch) except TooShortUttError: logging.warning( ""Utterance is too short for subsampling, return empty results"" ) results = [["" "", [""""], [2], ""nan""]] * nbest # Only supporting batch_size==1 key = keys[0] for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results): # Create a directory: outdir/{n}best_recog ibest_writer = writer[f""{n}best_recog""] # Write the result to each file ibest_writer[""token""][key] = "" "".join(token) ibest_writer[""token_int""][key] = "" "".join(map(str, token_int)) ibest_writer[""score""][key] = str(hyp.score) if text is not None: ibest_writer[""text""][key] = text " 25573,"def main() -> None: parser = argparse.ArgumentParser() parser.add_argument(""config"") args = parser.parse_args() with open(args.config, ""r"") as handler: config = json.load(handler) # validate the endpoints node_to_endpoint = dict() node_to_address = dict() for node_name, node_info in config[""nodes""].items(): if urlsplit(node_info[""endpoint""]).scheme == """": raise ValueError(""'endpoint' must have the protocol defined"") url_deposit = f""{node_info['endpoint']}/api/{API_VERSION}/address"" result = requests.get(url_deposit).json() if result[""our_address""] != node_info[""address""]: raise ValueError( f""Address mismatch, configuration {node_info['address']}, "" f""API response {result['our_address']}"" ) node_to_endpoint[node_name] = node_info[""endpoint""] node_to_address[node_name] = node_info[""address""] queue_per_node: Dict[str, List[ChannelNew]] = defaultdict(list) target_to_depositqueue: Dict[Tuple[str, str], JoinableQueue] = dict() # Schedule the requests to evenly distribute the load. This is important # because as of 0.100.5 channel can not be done concurrently, by dividing # the load evenly we make sure the channels are open as fast as possible. for token_address, channels_to_open in config[""networks""].items(): for channel in channels_to_open: node1 = channel[""node1""] node2 = channel[""node2""] participant1 = node_to_address[node1] participant2 = node_to_address[node2] is_node1_with_less_work = len(queue_per_node[participant1]) < len( queue_per_node[participant2] ) if is_node1_with_less_work: channel_new = ChannelNew( token_address=token_address, participant1=participant1, participant2=participant2, endpoint1=node_to_endpoint[node1], endpoint2=node_to_endpoint[node2], minimum_capacity1=channel[""minimum_capacity1""], minimum_capacity2=channel[""minimum_capacity2""], ) queue_per_node[participant1].append(channel_new) else: channel_new = ChannelNew( token_address=token_address, participant1=participant2, participant2=participant1, endpoint1=node_to_endpoint[node2], endpoint2=node_to_endpoint[node1], minimum_capacity1=channel[""minimum_capacity2""], minimum_capacity2=channel[""minimum_capacity1""], ) queue_per_node[participant2].append(channel_new) # queue used to order deposits target = (token_address, channel_new.participant2) if target not in target_to_depositqueue: target_to_depositqueue[target] = JoinableQueue() open_greenlets = set( gevent.spawn(channel_open_with_the_same_node, channels_to_open, target_to_depositqueue) for channels_to_open in queue_per_node.values() ) deposit_greenlets = [ gevent.spawn(channel_deposit_with_the_same_node_and_token_network, deposit_queue) for deposit_queue in target_to_depositqueue.values() ] gevent.joinall(open_greenlets, raise_error=True) log.info(""Opening the channels finished"") # Because all channels have been opened, there is no more deposits to do, # so now one just has to wait for the queues to get empty. for queue in target_to_depositqueue.values(): # Queue` and `JoinableQueue` don't have the method `rawlink`, so # `joinall` cannot be used. At the same time calling `join` in the # `JoinableQueue` was raising an exception `This operation would block # forever` which seems to be a false positive. Using `empty` to # circumvent it. while not queue.empty(): gevent.sleep(1) log.info(""Depositing to the channels finished"") # The deposit greenlets are infinite loops. gevent.killall(deposit_greenlets) ","def main() -> None: parser = argparse.ArgumentParser() parser.add_argument(""config"") args = parser.parse_args() with open(args.config, ""r"") as handler: config = json.load(handler) # validate the endpoints node_to_endpoint = dict() node_to_address = dict() for node_name, node_info in config[""nodes""].items(): if urlsplit(node_info[""endpoint""]).scheme == """": raise ValueError(""'endpoint' must have the protocol defined"") url_deposit = f""{node_info['endpoint']}/api/{API_VERSION}/address"" result = requests.get(url_deposit).json() if result[""our_address""] != node_info[""address""]: raise ValueError( f""Address mismatch, configuration {node_info['address']}, "" f""API response {result['our_address']}"" ) node_to_endpoint[node_name] = node_info[""endpoint""] node_to_address[node_name] = node_info[""address""] queue_per_node: Dict[str, List[ChannelNew]] = defaultdict(list) target_to_depositqueue: Dict[Tuple[str, str], JoinableQueue] = dict() # Schedule the requests to evenly distribute the load. This is important # because as of 0.100.5 channel can not be done concurrently, by dividing # the load evenly we make sure the channels are opened as fast as possible. for token_address, channels_to_open in config[""networks""].items(): for channel in channels_to_open: node1 = channel[""node1""] node2 = channel[""node2""] participant1 = node_to_address[node1] participant2 = node_to_address[node2] is_node1_with_less_work = len(queue_per_node[participant1]) < len( queue_per_node[participant2] ) if is_node1_with_less_work: channel_new = ChannelNew( token_address=token_address, participant1=participant1, participant2=participant2, endpoint1=node_to_endpoint[node1], endpoint2=node_to_endpoint[node2], minimum_capacity1=channel[""minimum_capacity1""], minimum_capacity2=channel[""minimum_capacity2""], ) queue_per_node[participant1].append(channel_new) else: channel_new = ChannelNew( token_address=token_address, participant1=participant2, participant2=participant1, endpoint1=node_to_endpoint[node2], endpoint2=node_to_endpoint[node1], minimum_capacity1=channel[""minimum_capacity2""], minimum_capacity2=channel[""minimum_capacity1""], ) queue_per_node[participant2].append(channel_new) # queue used to order deposits target = (token_address, channel_new.participant2) if target not in target_to_depositqueue: target_to_depositqueue[target] = JoinableQueue() open_greenlets = set( gevent.spawn(channel_open_with_the_same_node, channels_to_open, target_to_depositqueue) for channels_to_open in queue_per_node.values() ) deposit_greenlets = [ gevent.spawn(channel_deposit_with_the_same_node_and_token_network, deposit_queue) for deposit_queue in target_to_depositqueue.values() ] gevent.joinall(open_greenlets, raise_error=True) log.info(""Opening the channels finished"") # Because all channels have been opened, there is no more deposits to do, # so now one just has to wait for the queues to get empty. for queue in target_to_depositqueue.values(): # Queue` and `JoinableQueue` don't have the method `rawlink`, so # `joinall` cannot be used. At the same time calling `join` in the # `JoinableQueue` was raising an exception `This operation would block # forever` which seems to be a false positive. Using `empty` to # circumvent it. while not queue.empty(): gevent.sleep(1) log.info(""Depositing to the channels finished"") # The deposit greenlets are infinite loops. gevent.killall(deposit_greenlets) " 6457,"def prepare_chart_data(item_data): labels, qty_to_order, ordered_qty, received_qty, pending_qty = [], [], [], [], [] if len(item_data) > 30: item_data = dict(list(item_data.items())[:30]) for row in item_data: mr_row = item_data[row] labels.append(row) qty_to_order.append(mr_row[""qty_to_order""]) ordered_qty.append(mr_row[""ordered_qty""]) received_qty.append(mr_row[""received_qty""]) pending_qty.append(mr_row[""pending_qty""]) chart_data = { ""data"" : { ""labels"": labels, ""datasets"": [ { 'name': _('Qty to Order'), 'values': qty_to_order }, { 'name': _('Ordered Qty'), 'values': ordered_qty }, { 'name': _('Received Qty'), 'values': received_qty }, { 'name': _('Pending Qty'), 'values': pending_qty } ] }, ""type"": ""bar"", ""barOptions"": { ""stacked"": 1 }, } return chart_data ","def prepare_chart_data(item_data): labels, qty_to_order, ordered_qty, received_qty, pending_qty = [], [], [], [], [] if len(item_data) > 30: item_data = dict(list(item_data.items())[:30]) for row in item_data: mr_row = item_data[row] labels.append(row) qty_to_order.append(mr_row[""qty_to_order""]) ordered_qty.append(mr_row[""ordered_qty""]) received_qty.append(mr_row[""received_qty""]) pending_qty.append(mr_row[""pending_qty""]) chart_data = { ""data"" : { ""labels"": labels, ""datasets"": [ { 'name': _('Qty to Order'), 'values': qty_to_order }, { 'name': _('Ordered Qty'), 'values': ordered_qty }, { 'name': _('Received Qty'), 'values': received_qty }, { 'name': _('Pending Qty'), 'values': qty_to_receive } ] }, ""type"": ""bar"", ""barOptions"": { ""stacked"": 1 }, } return chart_data " 27385,"def produce_grid( tuple_of_limits: Sequence[float], grid_spacing: float ) -> np.ndarray: """"""Produce a 2D grid for the simulation system. The grid is based on the tuple of Cartesian Coordinate limits calculated in an earlier step. Parameters ---------- tuple_of_limits : tuple ``x_min, x_max, y_min, y_max`` grid_spacing : float grid size in all directions in ångström Returns ------- grid : array ``numpy.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing]`` """""" x_min, x_max, y_min, y_max = tuple_of_limits grid = np.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing] return grid ","def produce_grid( tuple_of_limits: Tuple[int, int, int, int], grid_spacing: float ) -> np.ndarray: """"""Produce a 2D grid for the simulation system. The grid is based on the tuple of Cartesian Coordinate limits calculated in an earlier step. Parameters ---------- tuple_of_limits : tuple ``x_min, x_max, y_min, y_max`` grid_spacing : float grid size in all directions in ångström Returns ------- grid : array ``numpy.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing]`` """""" x_min, x_max, y_min, y_max = tuple_of_limits grid = np.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing] return grid " 58751,"def to_tuple_type(ty, exprs): """"""Pack the sequence of expressions into the nested tuple type. Parameters ---------- ty: tvm.Type The type to pack with. exprs: The expressions to pack back into the nested tuple type. Returns ------- result: List[tvm.relay.Expr] The packed tuple expression. """""" return _make.ToTupleType(ty, exprs) ","def to_tuple_type(ty, exprs): """"""Pack the sequence of expressions into the nested tuple type. Parameters ---------- ty: tvm.Type The type to pack with. exprs: tvm.relay.Expr The expressions to pack back into the nested tuple type. Returns ------- result: List[tvm.relay.Expr] The packed tuple expression. """""" return _make.ToTupleType(ty, exprs) " 36984,"def val_to_json( run: ""Optional[LocalRun]"", key: str, val: ""ValToJsonType"", namespace: Optional[Union[str, int]] = None, ignore_copy_err: Optional[bool] = None, ) -> Union[Sequence, dict]: # Converts a wandb datatype to its JSON representation. if namespace is None: raise ValueError( ""val_to_json must be called with a namespace(a step number, or 'summary') argument"" ) converted = val typename = util.get_full_typename(val) if util.is_pandas_data_frame(val): val = wandb.Table(dataframe=val) elif util.is_matplotlib_typename(typename) or util.is_plotly_typename(typename): val = Plotly.make_plot_media(val) elif isinstance(val, Sequence) and all(isinstance(v, WBValue) for v in val): assert run # This check will break down if Image/Audio/... have child classes. if ( len(val) and isinstance(val[0], BatchableMedia) and all(isinstance(v, type(val[0])) for v in val) ): if TYPE_CHECKING: val = cast(Sequence[""BatchableMedia""], val) items = _prune_max_seq(val) if _server_accepts_image_filenames(): for item in items: item.bind_to_run( run=run, key=key, step=namespace, ignore_copy_err=ignore_copy_err, ) else: for i, item in enumerate(items): item.bind_to_run( run=run, key=key, step=namespace, id_=i, ignore_copy_err=ignore_copy_err, ) if run._attach_id and run._init_pid != os.getpid(): wandb.termwarn( f""Trying to log a sequence of {items[0].__class__.__name__}(s) from multiple processes might cause for data loss. Please upgrade your wandb server"", repeat=False, ) return items[0].seq_to_json(items, run, key, namespace) else: # TODO(adrian): Good idea to pass on the same key here? Maybe include # the array index? # There is a bug here: if this array contains two arrays of the same type of # anonymous media objects, their eventual names will collide. # This used to happen. The frontend doesn't handle heterogenous arrays # raise ValueError( # ""Mixed media types in the same list aren't supported"") return [ val_to_json( run, key, v, namespace=namespace, ignore_copy_err=ignore_copy_err ) for v in val ] if isinstance(val, WBValue): assert run if isinstance(val, Media) and not val.is_bound(): if hasattr(val, ""_log_type"") and val._log_type in [ ""table"", ""partitioned-table"", ""joined-table"", ]: # Special conditional to log tables as artifact entries as well. # I suspect we will generalize this as we transition to storing all # files in an artifact # we sanitize the key to meet the constraints defined in wandb_artifacts.py # in this case, leaving only alpha numerics or underscores. sanitized_key = re.sub(r""[^a-zA-Z0-9_]+"", """", key) art = wandb.wandb_sdk.wandb_artifacts.Artifact( ""run-{}-{}"".format(run.id, sanitized_key), ""run_table"" ) art.add(val, key) run.log_artifact(art) # Partitioned tables and joined tables do not support being bound to runs. if not ( hasattr(val, ""_log_type"") and val._log_type in [""partitioned-table"", ""joined-table""] ): val.bind_to_run(run, key, namespace) return val.to_json(run) return converted # type: ignore ","def val_to_json( run: ""Optional[LocalRun]"", key: str, val: ""ValToJsonType"", namespace: Optional[Union[str, int]] = None, ignore_copy_err: Optional[bool] = None, ) -> Union[Sequence, dict]: # Converts a wandb datatype to its JSON representation. if namespace is None: raise ValueError( ""val_to_json must be called with a namespace(a step number, or 'summary') argument"" ) converted = val typename = util.get_full_typename(val) if util.is_pandas_data_frame(val): val = wandb.Table(dataframe=val) elif util.is_matplotlib_typename(typename) or util.is_plotly_typename(typename): val = Plotly.make_plot_media(val) elif isinstance(val, Sequence) and all(isinstance(v, WBValue) for v in val): assert run # This check will break down if Image/Audio/... have child classes. if ( len(val) and isinstance(val[0], BatchableMedia) and all(isinstance(v, type(val[0])) for v in val) ): if TYPE_CHECKING: val = cast(Sequence[""BatchableMedia""], val) items = _prune_max_seq(val) if _server_accepts_image_filenames(): for item in items: item.bind_to_run( run=run, key=key, step=namespace, ignore_copy_err=ignore_copy_err, ) else: for i, item in enumerate(items): item.bind_to_run( run=run, key=key, step=namespace, id_=i, ignore_copy_err=ignore_copy_err, ) if run._attach_id and run._init_pid != os.getpid(): wandb.termwarn( f""Attempting to log a sequence of {items[0].__class__.__name__} objects from multiple processes might result in data loss. Please upgrade your wandb client with `pip install wandb -U`"", repeat=False, ) return items[0].seq_to_json(items, run, key, namespace) else: # TODO(adrian): Good idea to pass on the same key here? Maybe include # the array index? # There is a bug here: if this array contains two arrays of the same type of # anonymous media objects, their eventual names will collide. # This used to happen. The frontend doesn't handle heterogenous arrays # raise ValueError( # ""Mixed media types in the same list aren't supported"") return [ val_to_json( run, key, v, namespace=namespace, ignore_copy_err=ignore_copy_err ) for v in val ] if isinstance(val, WBValue): assert run if isinstance(val, Media) and not val.is_bound(): if hasattr(val, ""_log_type"") and val._log_type in [ ""table"", ""partitioned-table"", ""joined-table"", ]: # Special conditional to log tables as artifact entries as well. # I suspect we will generalize this as we transition to storing all # files in an artifact # we sanitize the key to meet the constraints defined in wandb_artifacts.py # in this case, leaving only alpha numerics or underscores. sanitized_key = re.sub(r""[^a-zA-Z0-9_]+"", """", key) art = wandb.wandb_sdk.wandb_artifacts.Artifact( ""run-{}-{}"".format(run.id, sanitized_key), ""run_table"" ) art.add(val, key) run.log_artifact(art) # Partitioned tables and joined tables do not support being bound to runs. if not ( hasattr(val, ""_log_type"") and val._log_type in [""partitioned-table"", ""joined-table""] ): val.bind_to_run(run, key, namespace) return val.to_json(run) return converted # type: ignore " 30739,"def check_pack_and_request_review(pr_number, github_token=None, verify_ssl=True): modified_packs = get_pr_modified_packs(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl) pr_author = get_pr_author(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl) reviewers = set() for pack in modified_packs: pack_metadata_path = os.path.join(PACKS_FULL_PATH, pack, PACK_METADATA) if os.path.exists(pack_metadata_path): with open(pack_metadata_path, 'r') as pack_metadata_file: pack_metadata = json.load(pack_metadata_file) if pack_metadata.get('support') != XSOAR_SUPPORT and PACK_METADATA_GITHUB_USER_FIELD in pack_metadata \ and pack_metadata[PACK_METADATA_GITHUB_USER_FIELD]: pack_reviewers = pack_metadata[PACK_METADATA_GITHUB_USER_FIELD] pack_reviewers = pack_reviewers if isinstance(pack_reviewers, list) else pack_reviewers.split("","") github_users = [u.lower() for u in pack_reviewers] for github_user in github_users: user_exists = check_if_user_exists(github_user=github_user, github_token=github_token, verify_ssl=verify_ssl) if user_exists and github_user != pr_author: reviewers.add(github_user) print(f""Found {github_user} default reviewer of pack {pack}"") elif pack_metadata.get('support') == XSOAR_SUPPORT: print(f""Skipping check of {pack} pack supported by {XSOAR_SUPPORT}"") else: print(f""{pack} pack has no default github reviewer"") else: print(f""Not found {pack} {PACK_METADATA} file."") if reviewers: request_review_from_user(reviewers=reviewers, pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl) else: print(""No reviewers were found."") ","def check_pack_and_request_review(pr_number, github_token=None, verify_ssl=True): modified_packs = get_pr_modified_packs(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl) pr_author = get_pr_author(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl) reviewers = set() for pack in modified_packs: pack_metadata_path = os.path.join(PACKS_FULL_PATH, pack, PACK_METADATA) if os.path.exists(pack_metadata_path): with open(pack_metadata_path, 'r') as pack_metadata_file: pack_metadata = json.load(pack_metadata_file) if pack_metadata.get('support') != XSOAR_SUPPORT and pack_metadata.get(PACK_METADATA_GITHUB_USER_FIELD): pack_reviewers = pack_metadata[PACK_METADATA_GITHUB_USER_FIELD] pack_reviewers = pack_reviewers if isinstance(pack_reviewers, list) else pack_reviewers.split("","") github_users = [u.lower() for u in pack_reviewers] for github_user in github_users: user_exists = check_if_user_exists(github_user=github_user, github_token=github_token, verify_ssl=verify_ssl) if user_exists and github_user != pr_author: reviewers.add(github_user) print(f""Found {github_user} default reviewer of pack {pack}"") elif pack_metadata.get('support') == XSOAR_SUPPORT: print(f""Skipping check of {pack} pack supported by {XSOAR_SUPPORT}"") else: print(f""{pack} pack has no default github reviewer"") else: print(f""Not found {pack} {PACK_METADATA} file."") if reviewers: request_review_from_user(reviewers=reviewers, pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl) else: print(""No reviewers were found."") " 7186,"def regionprops(label_image, intensity_image=None, cache=True, coordinates=None): r""""""Measure properties of labeled image regions. Parameters ---------- label_image : (N, M) ndarray Labeled input image. Labels with value 0 are ignored. .. versionchanged:: 0.14.1 Previously, ``label_image`` was processed by ``numpy.squeeze`` and so any number of singleton dimensions was allowed. This resulted in inconsistent handling of images with singleton dimensions. To recover the old behaviour, use ``regionprops(np.squeeze(label_image), ...)``. intensity_image : (N, M) ndarray, optional Intensity (i.e., input) image with same size as labeled image. Default is None. cache : bool, optional Determine whether to cache calculated properties. The computation is much faster for cached properties, whereas the memory consumption increases. coordinates : DEPRECATED This argument is deprecated and will be removed in a future version of scikit-image. See :ref:`Coordinate conventions ` for more details. .. deprecated:: 0.16.0 Use ""rc"" coordinates everywhere. It may be sufficient to call ``numpy.transpose`` on your label image to get the same values as 0.15 and earlier. However, for some properties, the transformation will be less trivial. For example, the new orientation is :math:`\frac{\pi}{2}` plus the old orientation. Returns ------- properties : list of RegionProperties Each item describes one labeled region, and can be accessed using the attributes listed below. Notes ----- The following properties can be accessed as attributes or keys: **area** : int Number of pixels of the region. **bbox** : tuple Bounding box ``(min_row, min_col, max_row, max_col)``. Pixels belonging to the bounding box are in the half-open interval ``[min_row; max_row)`` and ``[min_col; max_col)``. **bbox_area** : int Number of pixels of bounding box. **centroid** : array Centroid coordinate tuple ``(row, col)``. **convex_area** : int Number of pixels of convex hull image, which is the smallest convex polygon that encloses the region. **convex_image** : (H, J) ndarray Binary convex hull image which has the same size as bounding box. **coords** : (N, 2) ndarray Coordinate list ``(row, col)`` of the region. **eccentricity** : float Eccentricity of the ellipse that has the same second-moments as the region. The eccentricity is the ratio of the focal distance (distance between focal points) over the major axis length. The value is in the interval [0, 1). When it is 0, the ellipse becomes a circle. **equivalent_diameter** : float The diameter of a circle with the same area as the region. **euler_number** : int Euler characteristic of region. Computed as number of objects (= 1) subtracted by number of holes (8-connectivity). **extent** : float Ratio of pixels in the region to pixels in the total bounding box. Computed as ``area / (rows * cols)`` **filled_area** : int Number of pixels of the region will all the holes filled in. Describes the area of the filled_image. **filled_image** : (H, J) ndarray Binary region image with filled holes which has the same size as bounding box. **image** : (H, J) ndarray Sliced binary region image which has the same size as bounding box. **inertia_tensor** : ndarray Inertia tensor of the region for the rotation around its mass. **inertia_tensor_eigvals** : tuple The eigenvalues of the inertia tensor in decreasing order. **intensity_image** : ndarray Image inside region bounding box. **label** : int The label in the labeled input image. **local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box. **major_axis_length** : float The length of the major axis of the ellipse that has the same normalized second central moments as the region. **max_intensity** : float Value with the greatest intensity in the region. **mean_intensity** : float Value with the mean intensity in the region. **min_intensity** : float Value with the least intensity in the region. **minor_axis_length** : float The length of the minor axis of the ellipse that has the same normalized second central moments as the region. **moments** : (3, 3) ndarray Spatial moments up to 3rd order:: m_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **moments_central** : (3, 3) ndarray Central moments (translation invariant) up to 3rd order:: mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's centroid. **moments_hu** : tuple Hu moments (translation, scale and rotation invariant). **moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) up to 3rd order:: nu_ij = mu_ij / m_00^[(i+j)/2 + 1] where `m_00` is the zeroth spatial moment. **orientation** : float Angle between the 0th axis (rows) and the major axis of the ellipse that has the same second moments as the region, ranging from `-pi/2` to `pi/2` counter-clockwise. **perimeter** : float Perimeter of object which approximates the contour as a line through the centers of border pixels using a 4-connectivity. **slice** : tuple of slices A slice to extract the object from the source image. **solidity** : float Ratio of pixels in the region to pixels of the convex hull image. **weighted_centroid** : array Centroid coordinate tuple ``(row, col)`` weighted with intensity image. **weighted_local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box, weighted with intensity image. **weighted_moments** : (3, 3) ndarray Spatial moments of intensity image up to 3rd order:: wm_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **weighted_moments_central** : (3, 3) ndarray Central moments (translation invariant) of intensity image up to 3rd order:: wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's weighted centroid. **weighted_moments_hu** : tuple Hu moments (translation, scale and rotation invariant) of intensity image. **weighted_moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) of intensity image up to 3rd order:: wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1] where ``wm_00`` is the zeroth spatial moment (intensity-weighted area). Each region also supports iteration, so that you can do:: for prop in region: print(prop, region[prop]) See Also -------- label References ---------- .. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing: Core Algorithms. Springer-Verlag, London, 2009. .. [2] B. Jähne. Digital Image Processing. Springer-Verlag, Berlin-Heidelberg, 6. edition, 2005. .. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image Features, from Lecture notes in computer science, p. 676. Springer, Berlin, 1993. .. [4] https://en.wikipedia.org/wiki/Image_moment Examples -------- >>> from skimage import data, util >>> from skimage.measure import label >>> img = util.img_as_ubyte(data.coins()) > 110 >>> label_img = label(img, connectivity=img.ndim) >>> props = regionprops(label_img) >>> # centroid of first labeled object >>> props[0].centroid (22.729879860483141, 81.912285234465827) >>> # centroid of first labeled object >>> props[0]['centroid'] (22.729879860483141, 81.912285234465827) """""" if label_image.ndim not in (2, 3): raise TypeError('Only 2-D and 3-D images supported.') if not np.issubdtype(label_image.dtype, np.integer): raise TypeError('Non-integer image types are ambiguous:' 'use ndimage.label to label the connected' 'components of the image,' 'or label_image.astype(np.uint8) to interpret' 'the True values as a single label') if coordinates is not None: if coordinates == 'rc': msg = ('The coordinates keyword argument to skimage.measure.' 'regionprops is deprecated. All features are now computed ' 'in rc (row-column) coordinates. Please remove ' '`coordinates=""rc""` from all calls to regionprops before ' 'updating scikit-image.') warn(msg, stacklevel=2, category=FutureWarning) else: msg = ('Values other than ""rc"" for the ""coordinates"" argument ' 'to skimage.measure.regionprops are no longer supported. ' 'You should update your code to use ""rc"" coordinates and ' 'stop using the ""coordinates"" argument, or use skimage ' 'version 0.15.x or earlier.') raise ValueError(msg) regions = [] objects = ndi.find_objects(label_image) for i, sl in enumerate(objects): if sl is None: continue label = i + 1 props = RegionProperties(sl, label, label_image, intensity_image, cache) regions.append(props) return regions ","def regionprops(label_image, intensity_image=None, cache=True, coordinates=None): r""""""Measure properties of labeled image regions. Parameters ---------- label_image : (N, M) ndarray Labeled input image. Labels with value 0 are ignored. .. versionchanged:: 0.14.1 Previously, ``label_image`` was processed by ``numpy.squeeze`` and so any number of singleton dimensions was allowed. This resulted in inconsistent handling of images with singleton dimensions. To recover the old behaviour, use ``regionprops(np.squeeze(label_image), ...)``. intensity_image : (N, M) ndarray, optional Intensity (i.e., input) image with same size as labeled image. Default is None. cache : bool, optional Determine whether to cache calculated properties. The computation is much faster for cached properties, whereas the memory consumption increases. coordinates : DEPRECATED This argument is deprecated and will be removed in a future version of scikit-image. See :ref:`Coordinate conventions ` for more details. .. deprecated:: 0.16.0 Use ""rc"" coordinates everywhere. It may be sufficient to call ``numpy.transpose`` on your label image to get the same values as 0.15 and earlier. However, for some properties, the transformation will be less trivial. For example, the new orientation is :math:`\frac{\pi}{2}` plus the old orientation. Returns ------- properties : list of RegionProperties Each item describes one labeled region, and can be accessed using the attributes listed below. Notes ----- The following properties can be accessed as attributes or keys: **area** : int Number of pixels of the region. **bbox** : tuple Bounding box ``(min_row, min_col, max_row, max_col)``. Pixels belonging to the bounding box are in the half-open interval ``[min_row; max_row)`` and ``[min_col; max_col)``. **bbox_area** : int Number of pixels of bounding box. **centroid** : array Centroid coordinate tuple ``(row, col)``. **convex_area** : int Number of pixels of convex hull image, which is the smallest convex polygon that encloses the region. **convex_image** : (H, J) ndarray Binary convex hull image which has the same size as bounding box. **coords** : (N, 2) ndarray Coordinate list ``(row, col)`` of the region. **eccentricity** : float Eccentricity of the ellipse that has the same second-moments as the region. The eccentricity is the ratio of the focal distance (distance between focal points) over the major axis length. The value is in the interval [0, 1). When it is 0, the ellipse becomes a circle. **equivalent_diameter** : float The diameter of a circle with the same area as the region. **euler_number** : int Euler characteristic of region. Computed as number of objects (= 1) subtracted by number of holes (8-connectivity). **extent** : float Ratio of pixels in the region to pixels in the total bounding box. Computed as ``area / (rows * cols)`` **filled_area** : int Number of pixels of the region will all the holes filled in. Describes the area of the filled_image. **filled_image** : (H, J) ndarray Binary region image with filled holes which has the same size as bounding box. **image** : (H, J) ndarray Sliced binary region image which has the same size as bounding box. **inertia_tensor** : ndarray Inertia tensor of the region for the rotation around its mass. **inertia_tensor_eigvals** : tuple The eigenvalues of the inertia tensor in decreasing order. **intensity_image** : ndarray Image inside region bounding box. **label** : int The label in the labeled input image. **local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box. **major_axis_length** : float The length of the major axis of the ellipse that has the same normalized second central moments as the region. **max_intensity** : float Value with the greatest intensity in the region. **mean_intensity** : float Value with the mean intensity in the region. **min_intensity** : float Value with the least intensity in the region. **minor_axis_length** : float The length of the minor axis of the ellipse that has the same normalized second central moments as the region. **moments** : (3, 3) ndarray Spatial moments up to 3rd order:: m_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **moments_central** : (3, 3) ndarray Central moments (translation invariant) up to 3rd order:: mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's centroid. **moments_hu** : tuple Hu moments (translation, scale and rotation invariant). **moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) up to 3rd order:: nu_ij = mu_ij / m_00^[(i+j)/2 + 1] where `m_00` is the zeroth spatial moment. **orientation** : float Angle between the 0th axis (rows) and the major axis of the ellipse that has the same second moments as the region, ranging from `-pi/2` to `pi/2` counter-clockwise. **perimeter** : float Perimeter of object which approximates the contour as a line through the centers of border pixels using a 4-connectivity. **slice** : tuple of slices A slice to extract the object from the source image. **solidity** : float Ratio of pixels in the region to pixels of the convex hull image. **weighted_centroid** : array Centroid coordinate tuple ``(row, col)`` weighted with intensity image. **weighted_local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box, weighted with intensity image. **weighted_moments** : (3, 3) ndarray Spatial moments of intensity image up to 3rd order:: wm_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **weighted_moments_central** : (3, 3) ndarray Central moments (translation invariant) of intensity image up to 3rd order:: wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's weighted centroid. **weighted_moments_hu** : tuple Hu moments (translation, scale and rotation invariant) of intensity image. **weighted_moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) of intensity image up to 3rd order:: wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1] where ``wm_00`` is the zeroth spatial moment (intensity-weighted area). Each region also supports iteration, so that you can do:: for prop in region: print(prop, region[prop]) See Also -------- label References ---------- .. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing: Core Algorithms. Springer-Verlag, London, 2009. .. [2] B. Jähne. Digital Image Processing. Springer-Verlag, Berlin-Heidelberg, 6. edition, 2005. .. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image Features, from Lecture notes in computer science, p. 676. Springer, Berlin, 1993. .. [4] https://en.wikipedia.org/wiki/Image_moment Examples -------- >>> from skimage import data, util >>> from skimage.measure import label >>> img = util.img_as_ubyte(data.coins()) > 110 >>> label_img = label(img, connectivity=img.ndim) >>> props = regionprops(label_img) >>> # centroid of first labeled object >>> props[0].centroid (22.729879860483141, 81.912285234465827) >>> # centroid of first labeled object >>> props[0]['centroid'] (22.729879860483141, 81.912285234465827) """""" if label_image.ndim not in (2, 3): raise TypeError('Only 2-D and 3-D images supported.') if not np.issubdtype(label_image.dtype, np.integer): raise TypeError('Non-integer image types are ambiguous:' 'use ndimage.label to label the connected' 'components of the label_image, ' 'or label_image.astype(np.uint8) to interpret' 'the True values as a single label') if coordinates is not None: if coordinates == 'rc': msg = ('The coordinates keyword argument to skimage.measure.' 'regionprops is deprecated. All features are now computed ' 'in rc (row-column) coordinates. Please remove ' '`coordinates=""rc""` from all calls to regionprops before ' 'updating scikit-image.') warn(msg, stacklevel=2, category=FutureWarning) else: msg = ('Values other than ""rc"" for the ""coordinates"" argument ' 'to skimage.measure.regionprops are no longer supported. ' 'You should update your code to use ""rc"" coordinates and ' 'stop using the ""coordinates"" argument, or use skimage ' 'version 0.15.x or earlier.') raise ValueError(msg) regions = [] objects = ndi.find_objects(label_image) for i, sl in enumerate(objects): if sl is None: continue label = i + 1 props = RegionProperties(sl, label, label_image, intensity_image, cache) regions.append(props) return regions " 31283,"def main(): if demisto.command() == 'splunk-parse-raw': splunk_parse_raw_command() sys.exit(0) service = None proxy = demisto.params().get('proxy') use_requests_handler = demisto.params().get('use_requests_handler') connection_args = { 'host': demisto.params()['host'], 'port': demisto.params()['port'], 'app': demisto.params().get('app', '-'), 'username': demisto.params()['authentication']['identifier'], 'password': demisto.params()['authentication']['password'], 'verify': VERIFY_CERTIFICATE } if use_requests_handler: handle_proxy() connection_args['handler'] = requests_handler elif proxy: connection_args['handler'] = handler(proxy) try: service = client.connect(**connection_args) except urllib2.URLError as e: if e.reason.errno == 1 and sys.version_info < (2, 6, 3): # type: ignore pass else: raise if service is None: demisto.error(""Could not connect to SplunkPy"") # The command demisto.command() holds the command sent from the user. if demisto.command() == 'test-module': test_module(service) demisto.results('ok') if demisto.command() == 'splunk-search': splunk_search_command(service) if demisto.command() == 'splunk-job-create': splunk_job_create_command(service) if demisto.command() == 'splunk-results': splunk_results_command(service) if demisto.command() == 'fetch-incidents': fetch_incidents(service) if demisto.command() == 'splunk-get-indexes': splunk_get_indexes_command(service) if demisto.command() == 'splunk-submit-event': splunk_submit_event_command(service) if demisto.command() == 'splunk-notable-event-edit': splunk_edit_notable_event_command(proxy) if demisto.command() == 'splunk-submit-event-hec': splunk_submit_event_hec_command() if demisto.command() == 'splunk-job-status': splunk_job_status(service) if demisto.command().startswith('splunk-kv-') and service is not None: args = demisto.args() app = args.get('app_name', 'search') service.namespace = namespace(app=app, owner='nobody', sharing='app') check_error(service, args) if demisto.command() == 'splunk-kv-store-collection-create': kv_store_collection_create(service) elif demisto.command() == 'splunk-kv-store-collection-config': kv_store_collection_config(service) elif demisto.command() == 'splunk-kv-store-collection-delete': kv_store_collection_delete(service) elif demisto.command() == 'splunk-kv-store-collections-list': kv_store_collections_list(service) elif demisto.command() == 'splunk-kv-store-collection-add-entries': kv_store_collection_add_entries(service) elif demisto.command() in ['splunk-kv-store-collection-data-list', 'splunk-kv-store-collection-search-entry']: kv_store_collection_data(service) elif demisto.command() == 'splunk-kv-store-collection-data-delete': kv_store_collection_data_delete(service) elif demisto.command() == 'splunk-kv-store-collection-delete-entry': kv_store_collection_delete_entry(service) if demisto.command() == 'get-mapping-fields': if demisto.params().get('use_cim', False): demisto.results(get_cim_mapping_field_command()) else: get_mapping_fields_command(service) ","def main(): if demisto.command() == 'splunk-parse-raw': splunk_parse_raw_command() sys.exit(0) service = None proxy = demisto.params().get('proxy') use_requests_handler = demisto.params().get('use_requests_handler') connection_args = { 'host': demisto.params()['host'], 'port': demisto.params()['port'], 'app': demisto.params().get('app', '-'), 'username': demisto.params()['authentication']['identifier'], 'password': demisto.params()['authentication']['password'], 'verify': VERIFY_CERTIFICATE } if use_requests_handler: handle_proxy() connection_args['handler'] = requests_handler elif proxy: connection_args['handler'] = handler(proxy) try: service = client.connect(**connection_args) except urllib2.URLError as e: if e.reason.errno == 1 and sys.version_info < (2, 6, 3): # type: ignore pass else: raise if service is None: demisto.error(""Could not connect to SplunkPy"") # The command demisto.command() holds the command sent from the user. if demisto.command() == 'test-module': test_module(service) demisto.results('ok') if demisto.command() == 'splunk-search': splunk_search_command(service) if demisto.command() == 'splunk-job-create': splunk_job_create_command(service) if demisto.command() == 'splunk-results': splunk_results_command(service) if demisto.command() == 'fetch-incidents': fetch_incidents(service) if demisto.command() == 'splunk-get-indexes': splunk_get_indexes_command(service) if demisto.command() == 'splunk-submit-event': splunk_submit_event_command(service) if demisto.command() == 'splunk-notable-event-edit': splunk_edit_notable_event_command(proxy) if demisto.command() == 'splunk-submit-event-hec': splunk_submit_event_hec_command() if demisto.command() == 'splunk-job-status': splunk_job_status(service) if demisto.command().startswith('splunk-kv-') and service is not None: args = demisto.args() app = args.get('app_name', 'search') service.namespace = namespace(app=app, owner='nobody', sharing='app') check_error(service, args) if demisto.command() == 'splunk-kv-store-collection-create': kv_store_collection_create(service) elif demisto.command() == 'splunk-kv-store-collection-config': kv_store_collection_config(service) elif demisto.command() == 'splunk-kv-store-collection-delete': kv_store_collection_delete(service) elif demisto.command() == 'splunk-kv-store-collections-list': kv_store_collections_list(service) elif demisto.command() == 'splunk-kv-store-collection-add-entries': kv_store_collection_add_entries(service) elif demisto.command() in ['splunk-kv-store-collection-data-list', 'splunk-kv-store-collection-search-entry']: kv_store_collection_data(service) elif demisto.command() == 'splunk-kv-store-collection-data-delete': kv_store_collection_data_delete(service) elif demisto.command() == 'splunk-kv-store-collection-delete-entry': kv_store_collection_delete_entry(service) if demisto.command() == 'get-mapping-fields': if argToBoolean(demisto.params().get('use_cim', False)): demisto.results(get_cim_mapping_field_command()) else: get_mapping_fields_command(service) " 32347,"def fetch_indicators(): """"""Retrieve vulnerability data from Exodus Intelligence. Returns: Bool: True/False based on success or failure """""" score = 0.0 indicators = [] formatted_list = [] min_xi = 0 if MIN_XI == """" else MIN_XI max_xi = 10 if MAX_XI == """" else MAX_XI try: exodus = connect() demisto.debug(""Connected to server"") recent_vulns = exodus.get_recent_vulns() try: data = recent_vulns[""data""][""items""] except KeyError as e: demisto.debug(f""There was an error getting the data {e}"") demisto.debug(f""Fetched {len(data)} total vulnerabilities"") for item in data: try: cve = item[""cves""][0] report_data = {""cve"": cve, ""identifier"": item[""identifier""]} if score >= min_xi and score <= max_xi: report = exodus.get_report(cve) vulnerability = exodus.get_vuln(cve) if report[""ok""] is True: report_data = extract_data(report, report_data) vuln_data = extract_data(vulnerability, report_data) formatted_list.append(vuln_data) except KeyError as e: demisto.debug(f""There was a problem: {e}"") except Exception as e: demisto.debug(f""Something went wrong: {e}"") return False if len(formatted_list): for items in formatted_list: try: indicator = { ""value"": items[""identifier""], ""type"": ""Exodus Intelligence"", ""fields"": items, } indicators.append(indicator) except KeyError as e: demisto.debug(f""There was a problem creating indicators: {e}"") demisto.createIndicators(indicators) return True ","def fetch_indicators(): """"""Retrieve vulnerability data from Exodus Intelligence. Returns: Bool: True/False based on success or failure """""" score = 0.0 indicators = [] formatted_list = [] min_xi = 0 if MIN_XI == """" else MIN_XI max_xi = 10 if MAX_XI == """" else MAX_XI try: exodus = connect() demisto.debug(""Connected to server"") recent_vulns = exodus.get_recent_vulns() try: data = recent_vulns[""data""][""items""] except KeyError as e: demisto.debug(f""There was an error getting the data {e}"") demisto.debug(f""Fetched {len(data)} total vulnerabilities"") for item in data: try: cve = item[""cves""][0] report_data = {""cve"": cve, ""identifier"": item[""identifier""]} if score >= min_xi and score <= max_xi: report = exodus.get_report(cve) vulnerability = exodus.get_vuln(cve) if report.get(""ok""): report_data = extract_data(report, report_data) vuln_data = extract_data(vulnerability, report_data) formatted_list.append(vuln_data) except KeyError as e: demisto.debug(f""There was a problem: {e}"") except Exception as e: demisto.debug(f""Something went wrong: {e}"") return False if len(formatted_list): for items in formatted_list: try: indicator = { ""value"": items[""identifier""], ""type"": ""Exodus Intelligence"", ""fields"": items, } indicators.append(indicator) except KeyError as e: demisto.debug(f""There was a problem creating indicators: {e}"") demisto.createIndicators(indicators) return True " 17951,"def apply_thresholds(input, thresholds, choices): """"""Makes a choice based on an input and thresholds. From list of ``choices``, it selects one of them based on a list of inputs, depending on the position of each ``input`` whithin a list of ``thresholds``. It does so for each ``input`` provided. Args: input: A list of inputs to make a choice. thresholds: A list of thresholds to choose. choices: A list of the possible choices. Returns: :obj:`numpy.ndarray` of :obj:`float`: A list of the choices made. Raises: :exc:`AssertionError`: When the number of ``thresholds`` (t) and the number of choices (c) are not either t == c or t == c - 1. Examples: >>> apply_thresholds(np.array([4]), [5, 7], [10, 15, 20]) array([10]) >>> apply_thresholds(np.array([5]), [5, 7], [10, 15, 20]) array([10]) >>> apply_thresholds(np.array([6]), [5, 7], [10, 15, 20]) array([15]) >>> apply_thresholds(np.array([8]), [5, 7], [10, 15, 20]) array([20]) >>> apply_thresholds(np.array([10]), [5, 7, 9], [10, 15, 20]) array([0]) """""" condlist = [input <= threshold for threshold in thresholds] if len(condlist) == len(choices) - 1: # If a choice is provided for input > highest threshold, last condition must be true to return it. condlist += [True] assert len(condlist) == len(choices), \ ""apply_thresholds must be called with the same number of thresholds than choices, or one more choice"" return numpy.select(condlist, choices) ","def apply_thresholds(input, thresholds, choices): """"""Makes a choice based on an input and thresholds. From list of ``choices``, it selects one of them based on a list of inputs, depending on the position of each ``input`` whithin a list of ``thresholds``. It does so for each ``input`` provided. Args: input: A list of inputs to make a choice. thresholds: A list of thresholds to choose. choices: A list of the possible values to choose from. Returns: :obj:`numpy.ndarray` of :obj:`float`: A list of the choices made. Raises: :exc:`AssertionError`: When the number of ``thresholds`` (t) and the number of choices (c) are not either t == c or t == c - 1. Examples: >>> apply_thresholds(np.array([4]), [5, 7], [10, 15, 20]) array([10]) >>> apply_thresholds(np.array([5]), [5, 7], [10, 15, 20]) array([10]) >>> apply_thresholds(np.array([6]), [5, 7], [10, 15, 20]) array([15]) >>> apply_thresholds(np.array([8]), [5, 7], [10, 15, 20]) array([20]) >>> apply_thresholds(np.array([10]), [5, 7, 9], [10, 15, 20]) array([0]) """""" condlist = [input <= threshold for threshold in thresholds] if len(condlist) == len(choices) - 1: # If a choice is provided for input > highest threshold, last condition must be true to return it. condlist += [True] assert len(condlist) == len(choices), \ ""apply_thresholds must be called with the same number of thresholds than choices, or one more choice"" return numpy.select(condlist, choices) " 23123,"def test_to_dataframe_optimize_graph(): x = db.from_sequence( [{""name"": ""test1"", ""v1"": 1}, {""name"": ""test2"", ""v1"": 2}], npartitions=2 ) # linear operations will be fused by graph optimization y = x.map(lambda a: dict(**a, v2=a[""v1""] + 1)) y = y.map(lambda a: dict(**a, v3=a[""v2""] + 1)) # with optimizations d = y.to_dataframe()[""v3""] assert len([k for k in d.dask if k[0].startswith(""getitem"")]) == 2 # no optimizations d2 = y.to_dataframe(optimize_graph=False)[""v3""] # due to fusing the unoptimized graph will be larger assert len(dict(d2.dask)) > len(dict(d.dask)) assert (d.compute() == d2.compute()).all() ","def test_to_dataframe_optimize_graph(): x = db.from_sequence( [{""name"": ""test1"", ""v1"": 1}, {""name"": ""test2"", ""v1"": 2}], npartitions=2 ) # linear operations will be fused by graph optimization y = x.map(lambda a: dict(**a, v2=a[""v1""] + 1)) y = y.map(lambda a: dict(**a, v3=a[""v2""] + 1)) # with optimizations d = y.to_dataframe() assert len([k for k in d.dask if k[0].startswith(""getitem"")]) == 2 # no optimizations d2 = y.to_dataframe(optimize_graph=False)[""v3""] # due to fusing the unoptimized graph will be larger assert len(dict(d2.dask)) > len(dict(d.dask)) assert (d.compute() == d2.compute()).all() " 48793,"def run_this_func(): """""" Print the payload ""message"" passed to the DagRun conf attribute. :param context: The execution context :type context: dict """""" context = get_current_context() print(f""Remotely received value of {context['dag_run'].conf['message']} for key=message"") ","def run_this_func(dag_run): """""" Print the payload ""message"" passed to the DagRun conf attribute. """""" print(f""Remotely received value of {dag_run.conf['message']} for key=message"") " 31024,"def panorama_route_lookup(dest_ip: str, virtual_router=None): """""" Given the provided ip address, looks up the outgoing interface and zone on the firewall. """""" if not VSYS: raise Exception(""The 'panorama-route-lookup' command is only relevant for a Firewall instance."") response = panorama_get_routes(virtual_router) if 'entry' not in response['response']['result']: raise Exception(""No routes returned from the Firewall."") else: routes = response['response']['result']['entry'] ip_addr = ipaddress.ip_address(dest_ip) current_match = None matched_route = None for route in routes: subnet_raw = route['destination'] subnet = ipaddress.ip_network(subnet_raw) # If the given IP address is in the subnet if ip_addr in subnet: # IF we haven't matched yet if not current_match: current_match = subnet matched_route = route # If this is a greater subnet elif subnet.prefixlen > current_match.prefixlen: current_match = subnet matched_route = route if matched_route: return matched_route else: raise Exception(""Route not found."") ","def panorama_route_lookup(dest_ip: str, virtual_router=None): """""" Given the provided ip address, looks up the outgoing interface and zone on the firewall. """""" if not VSYS: raise Exception(""The 'panorama-route-lookup' command is only relevant for a Firewall instance."") response = panorama_get_routes(virtual_router) if 'entry' not in response['response']['result']: raise Exception(""No routes returned from the Firewall."") else: routes = response['response']['result']['entry'] ip_addr = ipaddress.ip_address(dest_ip) current_match = None matched_route = None for route in routes: subnet_raw = route['destination'] subnet = ipaddress.ip_network(subnet_raw) # If the given IP address is in the subnet if ip_addr in subnet: # IF we haven't matched yet if not current_match: current_match = subnet matched_route = route # If this is a greater subnet elif subnet.prefixlen > current_match.prefixlen: current_match = subnet matched_route = route if matched_route: return matched_route else: raise DemistoException(""Route not found."") " 40256,"def parse_send_xml_tree(gmp, xml_tree): for schedule in xml_tree.xpath('schedule'): name = schedule.find('name').text comment = schedule.find('comment').text if comment is None: comment = '' ical = schedule.find('icalendar').text timezone = schedule.find('timezone_abbrev').text if timezone is None: timezone = schedule.find('timezone').text gmp.create_schedule( name=name, comment=comment, timezone=timezone, icalendar=ical ) ","def parse_send_xml_tree(gmp, xml_tree): for schedule in xml_tree.xpath('schedule'): name = schedule.find('name').text comment = schedule.find('comment').text if comment is None: comment = '' ical = schedule.find('icalendar').text timezone = schedule.find('timezone').text gmp.create_schedule( name=name, comment=comment, timezone=timezone, icalendar=ical ) " 11340,"def create_messages_from_dicts_if_needed(messages, message_type): # type: (MessagesType, Type[ServiceBusMessage]) -> Union[ServiceBusMessage, List[ServiceBusMessage]] """""" This method is used to convert dict representations of messages to a list of ServiceBusMessage objects or ServiceBusBatchMessage. :param Messages messages: A list or single instance of messages of type ServiceBusMessages or dict representations of type ServiceBusMessage. Also accepts ServiceBusBatchMessage. :param Type[ServiceBusMessage] message_type: The class type to return the messages as. :rtype: Union[ServiceBusMessage, List[ServiceBusMessage]] """""" if isinstance(messages, list): return [_single_message_from_dict(m, message_type) for m in messages] return _single_message_from_dict(messages, message_type) ","def create_messages_from_dicts_if_needed(messages, message_type): # type: (MessagesType, Type[ServiceBusMessage]) -> Union[ServiceBusMessage, List[ServiceBusMessage]] """""" This method is used to convert dict representations of messages to a list of ServiceBusMessage objects. :param Messages messages: A list or single instance of messages of type ServiceBusMessages or dict representations of type ServiceBusMessage. Also accepts ServiceBusBatchMessage. :param Type[ServiceBusMessage] message_type: The class type to return the messages as. :rtype: Union[ServiceBusMessage, List[ServiceBusMessage]] """""" if isinstance(messages, list): return [_single_message_from_dict(m, message_type) for m in messages] return _single_message_from_dict(messages, message_type) " 39321,"def load_sphere_vectors(): """"""Create example sphere with a swirly vector field defined on nodes."""""" sphere = pyvista.Sphere(radius=3.14) # make cool swirly pattern vectors = np.vstack( ( np.sin(sphere.points[:, 0]), np.cos(sphere.points[:, 1]), np.cos(sphere.points[:, 2]), ) ).T # add and scale sphere[""vectors""] = vectors * 0.3 sphere.set_active_vectors = ""vectors"" return sphere ","def load_sphere_vectors(): """"""Create example sphere with a swirly vector field defined on nodes."""""" sphere = pyvista.Sphere(radius=3.14) # make cool swirly pattern vectors = np.vstack( ( np.sin(sphere.points[:, 0]), np.cos(sphere.points[:, 1]), np.cos(sphere.points[:, 2]), ) ).T # add and scale sphere[""vectors""] = vectors * 0.3 sphere.set_active_vectors(""vectors"") return sphere " 33515,"def send_event_to_api_destination(target_arn, event, http_parameters: Dict = None): """"""Send an event to an EventBridge API destination See https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-api-destinations.html"""""" # ARN format: ...:api-destination/{name}/{uuid} region = target_arn.split("":"")[3] api_destination_name = target_arn.split("":"")[-1].split(""/"")[1] events_client = connect_to_service(""events"", region_name=region) destination = events_client.describe_api_destination(Name=api_destination_name) # get destination endpoint details method = destination.get(""HttpMethod"", ""GET"") endpoint = destination.get(""InvocationEndpoint"") state = destination.get(""ApiDestinationState"") or ""ACTIVE"" LOG.debug('Calling EventBridge API destination (state ""%s""): %s %s', state, method, endpoint) headers = { # default headers AWS sends with every api destination call ""User-Agent"": ""Amazon/EventBridge/ApiDestinations"", ""Content-Type"": ""application/json; charset=utf-8"", ""Range"": ""bytes=0-1048575"", ""Accept-Encoding"": ""gzip,deflate"", ""Connection"": ""close"", } endpoint = add_api_destination_authorization(destination, headers, event) if http_parameters: endpoint = add_http_parameters(http_parameters, endpoint, headers, event) result = requests.request( method=method, url=endpoint, data=json.dumps(event or {}), headers=headers ) if result.status_code >= 400: LOG.debug(""Received code %s forwarding events: %s %s"", result.status_code, method, endpoint) if result.status_code == 429 or 500 <= result.status_code <= 600: pass # TODO: retry logic (only retry on 429 and 5xx response status) ","def send_event_to_api_destination(target_arn, event, http_parameters: Optional[Dict] = None): """"""Send an event to an EventBridge API destination See https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-api-destinations.html"""""" # ARN format: ...:api-destination/{name}/{uuid} region = target_arn.split("":"")[3] api_destination_name = target_arn.split("":"")[-1].split(""/"")[1] events_client = connect_to_service(""events"", region_name=region) destination = events_client.describe_api_destination(Name=api_destination_name) # get destination endpoint details method = destination.get(""HttpMethod"", ""GET"") endpoint = destination.get(""InvocationEndpoint"") state = destination.get(""ApiDestinationState"") or ""ACTIVE"" LOG.debug('Calling EventBridge API destination (state ""%s""): %s %s', state, method, endpoint) headers = { # default headers AWS sends with every api destination call ""User-Agent"": ""Amazon/EventBridge/ApiDestinations"", ""Content-Type"": ""application/json; charset=utf-8"", ""Range"": ""bytes=0-1048575"", ""Accept-Encoding"": ""gzip,deflate"", ""Connection"": ""close"", } endpoint = add_api_destination_authorization(destination, headers, event) if http_parameters: endpoint = add_http_parameters(http_parameters, endpoint, headers, event) result = requests.request( method=method, url=endpoint, data=json.dumps(event or {}), headers=headers ) if result.status_code >= 400: LOG.debug(""Received code %s forwarding events: %s %s"", result.status_code, method, endpoint) if result.status_code == 429 or 500 <= result.status_code <= 600: pass # TODO: retry logic (only retry on 429 and 5xx response status) " 2368,"def ward_tree(X, *, connectivity=None, n_clusters=None, return_distance=False): """"""Ward clustering based on a Feature matrix. Recursively merges the pair of clusters that minimally increases within-cluster variance. The inertia matrix uses a Heapq-based representation. This is the structured version, that takes into account some topological structure between samples. Read more in the :ref:`User Guide `. Parameters ---------- X : array-like of shape (n_samples, n_features) feature matrix representing `n_samples` samples to be clustered. connectivity : sparse matrix, default=None connectivity matrix. Defines for each sample the neighboring samples following a given structure of the data. The matrix is assumed to be symmetric and only the upper triangular half is used. Default is None, i.e, the Ward algorithm is unstructured. n_clusters : int, default=None `n_clusters` should be less than `n_samples`. Stop early the construction of the tree at `n_clusters.` This is useful to decrease computation time if the number of clusters is not small compared to the number of samples. In this case, the complete tree is not computed, thus the 'children' output is of limited use, and the 'parents' output should rather be used. This option is valid only when specifying a connectivity matrix. return_distance : bool, default=False If `True`, return the distance between the clusters. Returns ------- children : ndarray of shape (n_nodes-1, 2) The children of each non-leaf node. Values less than `n_samples` correspond to leaves of the tree which are the original samples. A node `i` greater than or equal to `n_samples` is a non-leaf node and has children `children_[i - n_samples]`. Alternatively at the i-th iteration, children[i][0] and children[i][1] are merged to form node `n_samples + i`. n_connected_components : int The number of connected components in the graph. n_leaves : int The number of leaves in the tree. parents : ndarray of shape (n_nodes,) or None The parent of each node. Only returned when a connectivity matrix is specified, elsewhere 'None' is returned. distances : ndarray of shape (n_nodes-1,) Only returned if `return_distance` is set to `True` (for compatibility). The distances between the centers of the nodes. `distances[i]` corresponds to a weighted Euclidean distance between the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to leaves of the tree, then `distances[i]` is their unweighted Euclidean distance. Distances are updated in the following way (from scipy.hierarchy.linkage): The new entry :math:`d(u,v)` is computed as follows, .. math:: d(u,v) = \\sqrt{\\frac{|v|+|s|} {T}d(v,s)^2 + \\frac{|v|+|t|} {T}d(v,t)^2 - \\frac{|v|} {T}d(s,t)^2} where :math:`u` is the newly joined cluster consisting of clusters :math:`s` and :math:`t`, :math:`v` is an unused cluster in the forest, :math:`T=|v|+|s|+|t|`, and :math:`|*|` is the cardinality of its argument. This is also known as the incremental algorithm. """""" X = np.asarray(X) if X.ndim == 1: X = np.reshape(X, (-1, 1)) n_samples, n_features = X.shape if connectivity is None: from scipy.cluster import hierarchy # imports PIL if n_clusters is not None: warnings.warn( ""Partial build of the tree is implemented "" ""only for structured clustering (i.e. with "" ""explicit connectivity). The algorithm "" ""will build the full tree and only "" ""retain the lower branches required "" ""for the specified number of clusters"", stacklevel=2, ) X = np.require(X, requirements=""W"") out = hierarchy.ward(X) children_ = out[:, :2].astype(np.intp) if return_distance: distances = out[:, 2] return children_, 1, n_samples, None, distances else: return children_, 1, n_samples, None connectivity, n_connected_components = _fix_connectivity( X, connectivity, affinity=""euclidean"" ) if n_clusters is None: n_nodes = 2 * n_samples - 1 else: if n_clusters > n_samples: raise ValueError( ""Cannot provide more clusters than samples. "" ""%i n_clusters was asked, and there are %i "" ""samples."" % (n_clusters, n_samples) ) n_nodes = 2 * n_samples - n_clusters # create inertia matrix coord_row = [] coord_col = [] A = [] for ind, row in enumerate(connectivity.rows): A.append(row) # We keep only the upper triangular for the moments # Generator expressions are faster than arrays on the following row = [i for i in row if i < ind] coord_row.extend( len(row) * [ ind, ] ) coord_col.extend(row) coord_row = np.array(coord_row, dtype=np.intp, order=""C"") coord_col = np.array(coord_col, dtype=np.intp, order=""C"") # build moments as a list moments_1 = np.zeros(n_nodes, order=""C"") moments_1[:n_samples] = 1 moments_2 = np.zeros((n_nodes, n_features), order=""C"") moments_2[:n_samples] = X inertia = np.empty(len(coord_row), dtype=np.float64, order=""C"") _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, inertia) inertia = list(zip(inertia, coord_row, coord_col)) heapify(inertia) # prepare the main fields parent = np.arange(n_nodes, dtype=np.intp) used_node = np.ones(n_nodes, dtype=bool) children = [] if return_distance: distances = np.empty(n_nodes - n_samples) not_visited = np.empty(n_nodes, dtype=np.int8, order=""C"") # recursive merge loop for k in range(n_samples, n_nodes): # identify the merge while True: inert, i, j = heappop(inertia) if used_node[i] and used_node[j]: break parent[i], parent[j] = k, k children.append((i, j)) used_node[i] = used_node[j] = False if return_distance: # store inertia value distances[k - n_samples] = inert # update the moments moments_1[k] = moments_1[i] + moments_1[j] moments_2[k] = moments_2[i] + moments_2[j] # update the structure matrix A and the inertia matrix coord_col = [] not_visited.fill(1) not_visited[k] = 0 _hierarchical._get_parents(A[i], coord_col, parent, not_visited) _hierarchical._get_parents(A[j], coord_col, parent, not_visited) # List comprehension is faster than a for loop [A[col].append(k) for col in coord_col] A.append(coord_col) coord_col = np.array(coord_col, dtype=np.intp, order=""C"") coord_row = np.empty(coord_col.shape, dtype=np.intp, order=""C"") coord_row.fill(k) n_additions = len(coord_row) ini = np.empty(n_additions, dtype=np.float64, order=""C"") _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, ini) # List comprehension is faster than a for loop [heappush(inertia, (ini[idx], k, coord_col[idx])) for idx in range(n_additions)] # Separate leaves in children (empty lists up to now) n_leaves = n_samples # sort children to get consistent output with unstructured version children = [c[::-1] for c in children] children = np.array(children) # return numpy array for efficient caching if return_distance: # 2 is scaling factor to compare w/ unstructured version distances = np.sqrt(2.0 * distances) return children, n_connected_components, n_leaves, parent, distances else: return children, n_connected_components, n_leaves, parent ","def ward_tree(X, *, connectivity=None, n_clusters=None, return_distance=False): """"""Ward clustering based on a Feature matrix. Recursively merges the pair of clusters that minimally increases within-cluster variance. The inertia matrix uses a Heapq-based representation. This is the structured version, that takes into account some topological structure between samples. Read more in the :ref:`User Guide `. Parameters ---------- X : array-like of shape (n_samples, n_features) Feature matrix representing `n_samples` samples to be clustered. connectivity : sparse matrix, default=None connectivity matrix. Defines for each sample the neighboring samples following a given structure of the data. The matrix is assumed to be symmetric and only the upper triangular half is used. Default is None, i.e, the Ward algorithm is unstructured. n_clusters : int, default=None `n_clusters` should be less than `n_samples`. Stop early the construction of the tree at `n_clusters.` This is useful to decrease computation time if the number of clusters is not small compared to the number of samples. In this case, the complete tree is not computed, thus the 'children' output is of limited use, and the 'parents' output should rather be used. This option is valid only when specifying a connectivity matrix. return_distance : bool, default=False If `True`, return the distance between the clusters. Returns ------- children : ndarray of shape (n_nodes-1, 2) The children of each non-leaf node. Values less than `n_samples` correspond to leaves of the tree which are the original samples. A node `i` greater than or equal to `n_samples` is a non-leaf node and has children `children_[i - n_samples]`. Alternatively at the i-th iteration, children[i][0] and children[i][1] are merged to form node `n_samples + i`. n_connected_components : int The number of connected components in the graph. n_leaves : int The number of leaves in the tree. parents : ndarray of shape (n_nodes,) or None The parent of each node. Only returned when a connectivity matrix is specified, elsewhere 'None' is returned. distances : ndarray of shape (n_nodes-1,) Only returned if `return_distance` is set to `True` (for compatibility). The distances between the centers of the nodes. `distances[i]` corresponds to a weighted Euclidean distance between the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to leaves of the tree, then `distances[i]` is their unweighted Euclidean distance. Distances are updated in the following way (from scipy.hierarchy.linkage): The new entry :math:`d(u,v)` is computed as follows, .. math:: d(u,v) = \\sqrt{\\frac{|v|+|s|} {T}d(v,s)^2 + \\frac{|v|+|t|} {T}d(v,t)^2 - \\frac{|v|} {T}d(s,t)^2} where :math:`u` is the newly joined cluster consisting of clusters :math:`s` and :math:`t`, :math:`v` is an unused cluster in the forest, :math:`T=|v|+|s|+|t|`, and :math:`|*|` is the cardinality of its argument. This is also known as the incremental algorithm. """""" X = np.asarray(X) if X.ndim == 1: X = np.reshape(X, (-1, 1)) n_samples, n_features = X.shape if connectivity is None: from scipy.cluster import hierarchy # imports PIL if n_clusters is not None: warnings.warn( ""Partial build of the tree is implemented "" ""only for structured clustering (i.e. with "" ""explicit connectivity). The algorithm "" ""will build the full tree and only "" ""retain the lower branches required "" ""for the specified number of clusters"", stacklevel=2, ) X = np.require(X, requirements=""W"") out = hierarchy.ward(X) children_ = out[:, :2].astype(np.intp) if return_distance: distances = out[:, 2] return children_, 1, n_samples, None, distances else: return children_, 1, n_samples, None connectivity, n_connected_components = _fix_connectivity( X, connectivity, affinity=""euclidean"" ) if n_clusters is None: n_nodes = 2 * n_samples - 1 else: if n_clusters > n_samples: raise ValueError( ""Cannot provide more clusters than samples. "" ""%i n_clusters was asked, and there are %i "" ""samples."" % (n_clusters, n_samples) ) n_nodes = 2 * n_samples - n_clusters # create inertia matrix coord_row = [] coord_col = [] A = [] for ind, row in enumerate(connectivity.rows): A.append(row) # We keep only the upper triangular for the moments # Generator expressions are faster than arrays on the following row = [i for i in row if i < ind] coord_row.extend( len(row) * [ ind, ] ) coord_col.extend(row) coord_row = np.array(coord_row, dtype=np.intp, order=""C"") coord_col = np.array(coord_col, dtype=np.intp, order=""C"") # build moments as a list moments_1 = np.zeros(n_nodes, order=""C"") moments_1[:n_samples] = 1 moments_2 = np.zeros((n_nodes, n_features), order=""C"") moments_2[:n_samples] = X inertia = np.empty(len(coord_row), dtype=np.float64, order=""C"") _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, inertia) inertia = list(zip(inertia, coord_row, coord_col)) heapify(inertia) # prepare the main fields parent = np.arange(n_nodes, dtype=np.intp) used_node = np.ones(n_nodes, dtype=bool) children = [] if return_distance: distances = np.empty(n_nodes - n_samples) not_visited = np.empty(n_nodes, dtype=np.int8, order=""C"") # recursive merge loop for k in range(n_samples, n_nodes): # identify the merge while True: inert, i, j = heappop(inertia) if used_node[i] and used_node[j]: break parent[i], parent[j] = k, k children.append((i, j)) used_node[i] = used_node[j] = False if return_distance: # store inertia value distances[k - n_samples] = inert # update the moments moments_1[k] = moments_1[i] + moments_1[j] moments_2[k] = moments_2[i] + moments_2[j] # update the structure matrix A and the inertia matrix coord_col = [] not_visited.fill(1) not_visited[k] = 0 _hierarchical._get_parents(A[i], coord_col, parent, not_visited) _hierarchical._get_parents(A[j], coord_col, parent, not_visited) # List comprehension is faster than a for loop [A[col].append(k) for col in coord_col] A.append(coord_col) coord_col = np.array(coord_col, dtype=np.intp, order=""C"") coord_row = np.empty(coord_col.shape, dtype=np.intp, order=""C"") coord_row.fill(k) n_additions = len(coord_row) ini = np.empty(n_additions, dtype=np.float64, order=""C"") _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, ini) # List comprehension is faster than a for loop [heappush(inertia, (ini[idx], k, coord_col[idx])) for idx in range(n_additions)] # Separate leaves in children (empty lists up to now) n_leaves = n_samples # sort children to get consistent output with unstructured version children = [c[::-1] for c in children] children = np.array(children) # return numpy array for efficient caching if return_distance: # 2 is scaling factor to compare w/ unstructured version distances = np.sqrt(2.0 * distances) return children, n_connected_components, n_leaves, parent, distances else: return children, n_connected_components, n_leaves, parent " 7917,"def test_cell_translation(pincell_model_w_univ, mpi_intracomm): openmc.lib.finalize() openmc.lib.init(intracomm=mpi_intracomm) openmc.lib.simulation_init() # Cell 1 is filled with a material so it has a translation, but we can't # set it. cell = openmc.lib.cells[1] assert cell.get_translation() == pytest.approx([0., 0., 0.]) with pytest.raises(exc.GeometryError, match='not filled with'): cell.set_translation(np.array([1., 0., -1.])) # Cell 2 was given a universe, so we can assign it a translation vector cell = openmc.lib.cells[2] assert cell.get_translation() == pytest.approx([0., 0., 0.]) # This time we *can* set it cell.set_translation(np.array([1., 0., -1.])) assert cell.get_translation() == pytest.approx([1., 0., -1.]) ","def test_cell_translation(pincell_model_w_univ, mpi_intracomm): openmc.lib.finalize() openmc.lib.init(intracomm=mpi_intracomm) openmc.lib.simulation_init() # Cell 1 is filled with a material so it has a translation, but we can't # set it. cell = openmc.lib.cells[1] assert cell.get_translation() == pytest.approx([0., 0., 0.]) with pytest.raises(exc.GeometryError, match='not filled with'): cell.set_translation((1., 0., -1.)) # Cell 2 was given a universe, so we can assign it a translation vector cell = openmc.lib.cells[2] assert cell.get_translation() == pytest.approx([0., 0., 0.]) # This time we *can* set it cell.set_translation(np.array([1., 0., -1.])) assert cell.get_translation() == pytest.approx([1., 0., -1.]) " 56480,"def test_nested_measurement_throws_error(experiment, DAC, DMM): meas1 = Measurement() meas2 = Measurement() # pytest.raises(Exception): does not work because it does not allow # the state of _is_entered to be changed to False when context manager # ends. Hence all the test after this one fails. try: with meas1.run(): with meas2.run(): pass pass except RuntimeError: return True assert meas1.run()._is_entered == False assert meas2.run()._is_entered == False ","def test_nested_measurement_throws_error(experiment, DAC, DMM): meas1 = Measurement() meas2 = Measurement() # pytest.raises(Exception): does not work because it does not allow # the state of _is_entered to be changed to False when context manager # ends. Hence all the test after this one fails. exception_raised = False try: with meas1.run(): with meas2.run(): pass pass except RuntimeError as e: if str(e) == 'Nested measurements are not supported': exception_raised = True else: raise e assert exception_raised, ""Expected exception about nesting measurements wasn't raised"" assert meas1.run()._is_entered == False assert meas2.run()._is_entered == False " 1570,"def check_classification_targets(y): """"""Ensure that target y is of a non-regression type. Only the following target types (as defined in type_of_target) are allowed: 'binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences' Parameters ---------- y : array-like """""" y_type = type_of_target(y) if y_type not in ['binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences']: if issparse(y): raise TypeError( ""multilabel-indicator of type Sparse are not supported."" ) else: raise ValueError(""Unknown label type: %r"" % y_type) ","def check_classification_targets(y): """"""Ensure that target y is of a non-regression type. Only the following target types (as defined in type_of_target) are allowed: 'binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences' Parameters ---------- y : array-like """""" y_type = type_of_target(y) if y_type not in ['binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences']: if issparse(y): raise TypeError( ""sparse multilabel-indicator for y is not supported."" ) else: raise ValueError(""Unknown label type: %r"" % y_type) " 2004,"def _weighted_percentile(array, sample_weight, percentile=50, interpolation=""nearest""): """"""Compute weighted percentile Computes lower weighted percentile. If `array` is a 2D array, the `percentile` is computed along the axis 0. .. versionchanged:: 0.24 Accepts 2D `array`. Parameters ---------- array : ndarray of shape (n,) or (n, m) Values to take the weighted percentile of. sample_weight: ndarray of (n,) or (n, m) Weights for each value in `array`. Must be same shape as `array` or of shape `(array.shape[0],)`. percentile: inr or float, default=50 Percentile to compute. Must be value between 0 and 100. interpolation : {""linear"", ""lower"", ""higher"", ""nearest""}, default=""lower"" The interpolation method to use when the percentile lies between data points `i` and `j`: * `""linear""`: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`; * `""lower""`: i`; * `""higher""`: `j`; * `""nearest""`: `i` or `j`, whichever is nearest (default). .. versionadded: 0.24 Returns ------- percentile_value : float or int if `array` of shape (n,), otherwise\ ndarray of shape (m,) Weighted percentile. """""" possible_interpolation = (""linear"", ""lower"", ""higher"", ""nearest"") if interpolation not in possible_interpolation: raise ValueError( f""'interpolation' should be one of "" f""{', '.join(possible_interpolation)}. Got '{interpolation}' "" f""instead."" ) if np.any(np.count_nonzero(sample_weight, axis=0) < 1): raise ValueError( ""All weights cannot be null when computing a weighted percentile."" ) n_dim = array.ndim if n_dim == 0: return array[()] if array.ndim == 1: array = array.reshape((-1, 1)) if (array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]): # when `sample_weight` is 1D, we repeat it for each column of `array` sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T n_rows, n_cols = array.shape sorted_idx = np.argsort(array, axis=0) sorted_weights = _take_along_axis(sample_weight, sorted_idx, axis=0) percentile = np.array([percentile / 100] * n_cols) cum_weigths = stable_cumsum(sorted_weights, axis=0) def _squeeze_arr(arr, n_dim): return arr[0] if n_dim == 1 else arr # Percentile can be computed with 3 different alternative: # https://en.wikipedia.org/wiki/Percentile # These 3 alternatives depend of the value of a parameter C. NumPy uses # the variant where C=0 which allows to obtained a strictly monotically # increasing function which is defined as: # P = (x - 1) / (N - 1); x in [1, N] # Weighted percentile change this formula by taking into account the # weights instead of the data frequency. # P_w = (x - w) / (S_w - w), x in [1, N], w being the weight and S_w being # the sum of the weights. adjusted_percentile = (cum_weigths - sorted_weights) with np.errstate(invalid=""ignore""): adjusted_percentile /= cum_weigths[-1] - sorted_weights nan_mask = np.isnan(adjusted_percentile) adjusted_percentile[nan_mask] = 1 if interpolation in (""lower"", ""higher"", ""nearest""): percentile_idx = np.array([ np.searchsorted(adjusted_percentile[:, col], percentile[col], side=""left"") for col in range(n_cols) ]) if interpolation == ""lower"" and np.all(percentile < 1): # P = 100 is a corner case for ""lower"" percentile_idx -= 1 elif interpolation == ""nearest"" and np.all(percentile < 1): for col in range(n_cols): error_higher = abs( adjusted_percentile[percentile_idx[col], col] - percentile[col] ) error_lower = abs( adjusted_percentile[percentile_idx[col] - 1, col] - percentile[col] ) if error_higher >= error_lower: percentile_idx[col] -= 1 percentile_idx = np.apply_along_axis( lambda x: np.clip(x, 0, n_rows - 1), axis=0, arr=percentile_idx ) percentile_value = array[ sorted_idx[percentile_idx, np.arange(n_cols)], np.arange(n_cols) ] percentile_value = _squeeze_arr(percentile_value, n_dim) else: # interpolation == ""linear"" percentile_value = np.array([ np.interp( x=percentile[col], xp=adjusted_percentile[:, col], fp=array[sorted_idx[:, col], col], ) for col in range(n_cols) ]) percentile_value = _squeeze_arr(percentile_value, n_dim) single_sample_weight = np.count_nonzero(sample_weight, axis=0) if np.any(single_sample_weight == 1): # edge case where a single weight is non-null in which case the # previous methods will fail if not isinstance(percentile_value, Iterable): percentile_value = _squeeze_arr( array[np.nonzero(sample_weight)], n_dim ) else: percentile_value = np.array([ array[np.flatnonzero(sample_weight[:, col])[0], col] if n_nonzero == 1 else percentile_value[col] for col, n_nonzero in enumerate(single_sample_weight) ]) return percentile_value ","def _weighted_percentile(array, sample_weight, percentile=50, interpolation=""nearest""): """"""Compute weighted percentile Computes lower weighted percentile. If `array` is a 2D array, the `percentile` is computed along the axis 0. .. versionchanged:: 0.24 Accepts 2D `array`. Parameters ---------- array : ndarray of shape (n,) or (n, m) Values to take the weighted percentile of. sample_weight: ndarray of (n,) or (n, m) Weights for each value in `array`. Must be same shape as `array` or of shape `(array.shape[0],)`. percentile: inr or float, default=50 Percentile to compute. Must be value between 0 and 100. interpolation : {""linear"", ""lower"", ""higher"", ""nearest""}, default=""lower"" The interpolation method to use when the percentile lies between data points `i` and `j`: * `""linear""`: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`; * `""lower""`: i`; * `""higher""`: `j`; * `""nearest""`: `i` or `j`, whichever is nearest (default). .. versionadded: 0.24 Returns ------- percentile_value : float or int if `array` of shape (n,), otherwise\ ndarray of shape (m,) Weighted percentile. """""" possible_interpolation = (""linear"", ""lower"", ""higher"", ""nearest"") if interpolation not in possible_interpolation: raise ValueError( f""'interpolation' should be one of "" f""{', '.join(possible_interpolation)}. Got '{interpolation}' "" f""instead."" ) if np.any(np.count_nonzero(sample_weight, axis=0) < 1): raise ValueError( ""All weights cannot be null when computing a weighted percentile."" ) n_dim = array.ndim if n_dim == 0: return array[()] if array.ndim == 1: array = array.reshape((-1, 1)) if (array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]): # when `sample_weight` is 1D, we repeat it for each column of `array` sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T n_rows, n_cols = array.shape sorted_idx = np.argsort(array, axis=0) sorted_weights = _take_along_axis(sample_weight, sorted_idx, axis=0) percentile = np.array([percentile / 100] * n_cols) cum_weigths = stable_cumsum(sorted_weights, axis=0) def _squeeze_arr(arr, n_dim): return arr[0] if n_dim == 1 else arr # Percentile can be computed with 3 different alternatives: # https://en.wikipedia.org/wiki/Percentile # These 3 alternatives depend of the value of a parameter C. NumPy uses # the variant where C=0 which allows to obtained a strictly monotically # increasing function which is defined as: # P = (x - 1) / (N - 1); x in [1, N] # Weighted percentile change this formula by taking into account the # weights instead of the data frequency. # P_w = (x - w) / (S_w - w), x in [1, N], w being the weight and S_w being # the sum of the weights. adjusted_percentile = (cum_weigths - sorted_weights) with np.errstate(invalid=""ignore""): adjusted_percentile /= cum_weigths[-1] - sorted_weights nan_mask = np.isnan(adjusted_percentile) adjusted_percentile[nan_mask] = 1 if interpolation in (""lower"", ""higher"", ""nearest""): percentile_idx = np.array([ np.searchsorted(adjusted_percentile[:, col], percentile[col], side=""left"") for col in range(n_cols) ]) if interpolation == ""lower"" and np.all(percentile < 1): # P = 100 is a corner case for ""lower"" percentile_idx -= 1 elif interpolation == ""nearest"" and np.all(percentile < 1): for col in range(n_cols): error_higher = abs( adjusted_percentile[percentile_idx[col], col] - percentile[col] ) error_lower = abs( adjusted_percentile[percentile_idx[col] - 1, col] - percentile[col] ) if error_higher >= error_lower: percentile_idx[col] -= 1 percentile_idx = np.apply_along_axis( lambda x: np.clip(x, 0, n_rows - 1), axis=0, arr=percentile_idx ) percentile_value = array[ sorted_idx[percentile_idx, np.arange(n_cols)], np.arange(n_cols) ] percentile_value = _squeeze_arr(percentile_value, n_dim) else: # interpolation == ""linear"" percentile_value = np.array([ np.interp( x=percentile[col], xp=adjusted_percentile[:, col], fp=array[sorted_idx[:, col], col], ) for col in range(n_cols) ]) percentile_value = _squeeze_arr(percentile_value, n_dim) single_sample_weight = np.count_nonzero(sample_weight, axis=0) if np.any(single_sample_weight == 1): # edge case where a single weight is non-null in which case the # previous methods will fail if not isinstance(percentile_value, Iterable): percentile_value = _squeeze_arr( array[np.nonzero(sample_weight)], n_dim ) else: percentile_value = np.array([ array[np.flatnonzero(sample_weight[:, col])[0], col] if n_nonzero == 1 else percentile_value[col] for col, n_nonzero in enumerate(single_sample_weight) ]) return percentile_value " 31940,"def test_connection(client: DataExplorerClient) -> str: """""" Test the connection with Azure Data Explorer service. Args: client (DataExplorerClient): Azure Data Explorer API client. Returns: str: Message about successfully connected to the Azure Data Explorer. """""" client.ms_client.get_access_token() return 'Success!' ","def test_connection(client: DataExplorerClient) -> str: """""" Test the connection with Azure Data Explorer service. Args: client (DataExplorerClient): Azure Data Explorer API client. Returns: str: Message about successfully connected to the Azure Data Explorer. """""" client.ms_client.get_access_token() return '✅ Success!' " 39178,"def spectral_centroid( waveform: Tensor, sample_rate: int, pad: int, window: Tensor, n_fft: int, hop_length: int, win_length: int, min_freq: Optional[float] = None, max_freq: Optional[float] = None, ) -> Tensor: r"""""" Compute the spectral centroid for each channel along the time axis. The spectral centroid is defined as the weighted average of the frequency values, weighted by their magnitude. Optionally find centroid of a limited range of the spectrum, specified by the optional min_freq and max_dreq arguments. Args: waveform (Tensor): Tensor of audio of dimension `(..., time)` sample_rate (int): Sample rate of the audio waveform pad (int): Two sided padding of signal window (Tensor): Window tensor that is applied/multiplied to each frame/window n_fft (int): Size of FFT hop_length (int): Length of hop between STFT windows win_length (int): Window size min_freq (float, optional): Specify a minimum frequency to include in centroid calculation max_freq (float, optional): Specify a maximum frequency to include in centroid calculation Returns: Tensor: Dimension `(..., time)` """""" nyquist = sample_rate // 2 fft_bins = 1 + n_fft // 2 specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length, win_length=win_length, power=1., normalized=False) freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2, device=specgram.device).reshape((-1, 1)) min_freq_index = int(round((min_freq * fft_bins) / nyquist)) if min_freq is not None else 0 max_freq_index = int(round((max_freq * fft_bins) / nyquist)) if max_freq is not None else fft_bins if min_freq is not None or max_freq is not None: assert min_freq_index < max_freq_index specgram = specgram[...,min_freq_index:max_freq_index,:] freqs = freqs[...,min_freq_index:max_freq_index,:] freq_dim = -2 return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim) ","def spectral_centroid( waveform: Tensor, sample_rate: int, pad: int, window: Tensor, n_fft: int, hop_length: int, win_length: int, min_freq: Optional[float] = None, max_freq: Optional[float] = None, ) -> Tensor: r"""""" Compute the spectral centroid for each channel along the time axis. The spectral centroid is defined as the weighted average of the frequency values, weighted by their magnitude. Optionally find centroid of a limited range of the spectrum, specified by the optional min_freq and max_dreq arguments. Args: waveform (Tensor): Tensor of audio of dimension `(..., time)` sample_rate (int): Sample rate of the audio waveform pad (int): Two sided padding of signal window (Tensor): Window tensor that is applied/multiplied to each frame/window n_fft (int): Size of FFT hop_length (int): Length of hop between STFT windows win_length (int): Window size min_freq (float, optional): Specify a minimum frequency to include in centroid calculation max_freq (float, optional): Specify a maximum frequency to include in centroid calculation Returns: Tensor: Dimension `(..., time)` """""" nyquist = sample_rate // 2 fft_bins = 1 + n_fft // 2 specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length, win_length=win_length, power=1., normalized=False) freqs = torch.linspace(0, nyquist, steps=fft_bins, device=specgram.device).reshape((-1, 1)) min_freq_index = int(round((min_freq * fft_bins) / nyquist)) if min_freq is not None else 0 max_freq_index = int(round((max_freq * fft_bins) / nyquist)) if max_freq is not None else fft_bins if min_freq is not None or max_freq is not None: assert min_freq_index < max_freq_index specgram = specgram[...,min_freq_index:max_freq_index,:] freqs = freqs[...,min_freq_index:max_freq_index,:] freq_dim = -2 return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim) " 43122,"def exit_with_error(): print('Usage: please provide a list of at least two integers to sort in the format “1, 2, 3, 4, 5”') sys.exit(1) ","def exit_with_error(): print('Usage: please provide a list of sorted integers (""1, 4, 5, 11, 12"") and the integer to find (""11"")') sys.exit(1) " 45339,"def export_config_help(filename: str): """""" Export all configs help messages to the CSV file. Parameters ---------- filename : str Name of the file to export configs data. """""" configs = pandas.DataFrame( columns=[ ""Config Name"", ""Env. Variable Name"", ""Default Value"", ""Description"", ""Options"", ] ) for objname in sorted(globals()): obj = globals()[objname] if isinstance(obj, type) and issubclass(obj, Parameter) and not obj.is_abstract: data = { ""Config Name"": obj.__name__, ""Env. Variable Name"": getattr(obj, ""varname"", None), ""Default Value"": obj._get_default() if obj.__name__ != ""RayRedisPassword"" else ""random string"", ""Description"": obj.__doc__, ""Options"": obj.choices, } configs = configs.append(data, ignore_index=True) configs.to_csv(filename, index=False) ","def export_config_help(filename: str): """""" Export all configs help messages to the CSV file. Parameters ---------- filename : str Name of the file to export configs data. """""" configs = pandas.DataFrame( columns=[ ""Config Name"", ""Env. Variable Name"", ""Default Value"", ""Description"", ""Options"", ] ) for objname in sorted(globals()): obj = globals()[objname] if isinstance(obj, type) and issubclass(obj, Parameter) and not obj.is_abstract: data = { ""Config Name"": obj.__name__, ""Env. Variable Name"": getattr(obj, ""varname"", ""not backed by environment""), ""Default Value"": obj._get_default() if obj.__name__ != ""RayRedisPassword"" else ""random string"", ""Description"": obj.__doc__, ""Options"": obj.choices, } configs = configs.append(data, ignore_index=True) configs.to_csv(filename, index=False) " 30276,"def get_dkim(auth): dkim_context = {} if auth is not None: result = re.search(r""dkim=(\w+)"", auth) if result is not None: dkim_context[""Validation-Result""] = result.group(1).lower() reason = re.search(""dkim=\w+ [(](.+?)[)]"", auth) if reason is not None: dkim_context[""Reason""] = reason.group(1) domain = re.findall(""dkim=[\w\W]+?[=@](\w+\.[^ ]+)"", auth) if domain != []: dkim_context[""Signing-Domain""] = domain[0] return dkim_context ","def get_dkim(auth): dkim_context = {} if auth is not None: result = re.search(r""dkim=(\w+)"", auth) if result is not None: dkim_context[""Validation-Result""] = result.group(1).lower() reason = re.search(""dkim=\w+ \((.+?)\)"", auth) if reason is not None: dkim_context[""Reason""] = reason.group(1) domain = re.findall(""dkim=[\w\W]+?[=@](\w+\.[^ ]+)"", auth) if domain != []: dkim_context[""Signing-Domain""] = domain[0] return dkim_context " 40116,"def _add_firmware_only_fields(fo, meta): ''' Adds fields relevant for FirmwareObjects from fo to meta :param meta: The dictionary to add the fields to :param fo: A FimwareObject ''' if isinstance(fo, Firmware): fo.root_uid = fo.uid meta['device_name'] = fo.device_name meta['device_class'] = fo.device_class meta['device_part'] = fo.part meta['vendor'] = fo.vendor meta['version'] = fo.version meta['release_date'] = fo.release_date ","def _add_firmware_only_fields(fo, meta): ''' Adds fields relevant for `Firmware` objects from `fo` to `meta` :param meta: The dictionary to add the fields to :param fo: A FimwareObject ''' if isinstance(fo, Firmware): fo.root_uid = fo.uid meta['device_name'] = fo.device_name meta['device_class'] = fo.device_class meta['device_part'] = fo.part meta['vendor'] = fo.vendor meta['version'] = fo.version meta['release_date'] = fo.release_date " 35575,"def cmake_workflows(indentation=6): jobs = [] python_version = '3.8' for os_type in ['linux', 'windows', 'macos']: # Right now CMake builds are failling on Windows (GPU) device_types = ['cpu', 'gpu'] if os_type != 'macos' else ['cpu'] for device in device_types: job = { 'name': f'cmake_{os_type}_{device}', 'python_version': python_version } job['cu_version'] = 'cu101' if device == 'gpu' else 'cpu' if device == 'gpu' and os_type == 'linux': job['wheel_docker_image'] = 'pytorch/manylinux-cuda101' jobs.append({f'cmake_{os_type}_{device}': job}) return indent(indentation, jobs) ","def cmake_workflows(indentation=6): jobs = [] python_version = '3.8' for os_type in ['linux', 'windows', 'macos']: # Skip OSX CUDA device_types = ['cpu', 'gpu'] if os_type != 'macos' else ['cpu'] for device in device_types: job = { 'name': f'cmake_{os_type}_{device}', 'python_version': python_version } job['cu_version'] = 'cu101' if device == 'gpu' else 'cpu' if device == 'gpu' and os_type == 'linux': job['wheel_docker_image'] = 'pytorch/manylinux-cuda101' jobs.append({f'cmake_{os_type}_{device}': job}) return indent(indentation, jobs) " 12474,"def console_entry() -> None: try: main(None, sys.stdout, sys.stderr) sys.stdout.flush() sys.stderr.flush() except BrokenPipeError: # Python flushes standard streams on exit; redirect remaining output # to devnull to avoid another BrokenPipeError at shutdown devnull = os.open(os.devnull, os.O_WRONLY) os.dup2(devnull, sys.stdout.fileno()) sys.exit(2) except KeyboardInterrupt: _, options = process_options(args=sys.argv[1:]) if options.show_traceback: sys.stdout.write(traceback.format_exc()) formatter = FancyFormatter(sys.stdout, sys.stderr, False) msg = "" KeybordInterrupt called by user. Abort!\n"" sys.stdout.write(formatter.style(msg, color=""red"", bold=True)) sys.stdout.flush() sys.stderr.flush() sys.exit(2) ","def console_entry() -> None: try: main(None, sys.stdout, sys.stderr) sys.stdout.flush() sys.stderr.flush() except BrokenPipeError: # Python flushes standard streams on exit; redirect remaining output # to devnull to avoid another BrokenPipeError at shutdown devnull = os.open(os.devnull, os.O_WRONLY) os.dup2(devnull, sys.stdout.fileno()) sys.exit(2) except KeyboardInterrupt: _, options = process_options(args=sys.argv[1:]) if options.show_traceback: sys.stdout.write(traceback.format_exc()) formatter = FancyFormatter(sys.stdout, sys.stderr, False) msg = ""Interrupted\n"" sys.stdout.write(formatter.style(msg, color=""red"", bold=True)) sys.stdout.flush() sys.stderr.flush() sys.exit(2) " 38497,"def compute_well_rock_matrix_intersections( gb: pp.GridBucket, cells: np.ndarray = None, tol: float = 1e-5 ): """"""Compute intersections and add edge coupling between the well and the rock matrix. To be called after the wells grids are constructed. We are assuming convex cells and one single high dimensional grid. Parameters: gb (pp.GridBucket): the grid bucket containing all the elements cells (np.ndarray, optional): a set of cells that might be considered to construct the tree. If it is not given the tree is constructed by using all the higher dimensional grid cells tol (float, optional): geometric tolerance """""" # Extract the dimension of the rock matrix, assumed to be of highest dimension dim_max: int = gb.dim_max() # We assume only one single higher dimensional grid, needed for the ADTree g_max: pp.Grid = gb.grids_of_dimension(dim_max)[0] # Construct an ADTree for fast computation tree = pp.adtree.ADTree(2 * g_max.dim, g_max.dim) tree.from_grid(g_max, cells) # Extract the grids of the wells of co-dimension 2 gs_w = gb.grids_of_dimension(dim_max - 2) # Pre-compute some well informations nodes_w = np.empty(gs_w.size, dtype=object) for idw, g_w in enumerate(gs_w): g_w_cn = g_w.cell_nodes() g_w_cells = np.arange(g_w.num_cells) # get the cells of the 0d as segments (start, end) first = g_w_cn.indptr[g_w_cells] second = g_w_cn.indptr[g_w_cells + 1] nodes_w[idw] = ( g_w_cn.indices[pp.utils.mcolon.mcolon(first, second)].reshape((-1, 2)).T ) # Operate on the rock matrix grid (faces, cells, _) = sps.find(g_max.cell_faces) faces = faces[np.argsort(cells)] nodes, _, _ = sps.find(g_max.face_nodes) indptr = g_max.face_nodes.indptr # Loop on all the well grids for g_w, n_w in zip(gs_w, nodes_w): # extract the start and end point of the segments start = g_w.nodes[:, n_w[0]] end = g_w.nodes[:, n_w[1]] # Lists for the cell_cell_map primary_to_mortar_I, primary_to_mortar_J, primary_to_mortar_data = [], [], [] # Operate on the segments for seg_id, (seg_start, seg_end) in enumerate(zip(start.T, end.T)): # Create the box for the segment by ordering its start and end box = np.sort(np.vstack((seg_start, seg_end)), axis=0).flatten() seg_cells = tree.search(pp.adtree.ADTNode(""dummy_node"", box)) # Loop on all the higher dimensional cells for c in seg_cells: # For the current cell retrieve its faces loc = slice(g_max.cell_faces.indptr[c], g_max.cell_faces.indptr[c + 1]) faces_loc = faces[loc] # Get the local nodes, face based poly = np.array( [ g_max.nodes[:, nodes[indptr[f] : indptr[f + 1]]] for f in faces_loc ] ) # Compute the intersections between the segment and the current higher # dimensional cell ratio = pp.intersections.segments_polyhedron( seg_start, seg_end, poly, tol ) # Store the requested information to build the projection operator if ratio > 0: primary_to_mortar_I += [seg_id] primary_to_mortar_J += [c] primary_to_mortar_data += ratio.tolist() primary_to_mortar_int = sps.csc_matrix( (primary_to_mortar_data, (primary_to_mortar_I, primary_to_mortar_J)), shape=(g_w.num_cells, g_max.num_cells), ) secondary_to_mortar_int = sps.diags(np.ones(g_w.num_cells), format=""csc"") # create the mortar grid and set the maps side_g = {pp.grids.mortar_grid.MortarSides.LEFT_SIDE: g_w.copy()} mg = pp.MortarGrid(g_w.dim, side_g, codim=g_max.dim - g_w.dim) mg.set_projection_to_mortar_int(primary_to_mortar_int, secondary_to_mortar_int) mg.compute_geometry() # add a new edge to the grid bucket gb.add_edge((g_max, g_w), mg._primary_to_mortar_int) d_e = gb.edge_props((g_max, g_w)) d_e[""mortar_grid""] = mg # Update the node number gb.assign_node_ordering() ","def compute_well_rock_matrix_intersections( gb: pp.GridBucket, cells: np.ndarray = None, tol: float = 1e-5 ): """"""Compute intersections and add edge coupling between the well and the rock matrix. To be called after the well grids are constructed. We are assuming convex cells and one single high dimensional grid. Parameters: gb (pp.GridBucket): the grid bucket containing all the elements cells (np.ndarray, optional): a set of cells that might be considered to construct the tree. If it is not given the tree is constructed by using all the higher dimensional grid cells tol (float, optional): geometric tolerance """""" # Extract the dimension of the rock matrix, assumed to be of highest dimension dim_max: int = gb.dim_max() # We assume only one single higher dimensional grid, needed for the ADTree g_max: pp.Grid = gb.grids_of_dimension(dim_max)[0] # Construct an ADTree for fast computation tree = pp.adtree.ADTree(2 * g_max.dim, g_max.dim) tree.from_grid(g_max, cells) # Extract the grids of the wells of co-dimension 2 gs_w = gb.grids_of_dimension(dim_max - 2) # Pre-compute some well informations nodes_w = np.empty(gs_w.size, dtype=object) for idw, g_w in enumerate(gs_w): g_w_cn = g_w.cell_nodes() g_w_cells = np.arange(g_w.num_cells) # get the cells of the 0d as segments (start, end) first = g_w_cn.indptr[g_w_cells] second = g_w_cn.indptr[g_w_cells + 1] nodes_w[idw] = ( g_w_cn.indices[pp.utils.mcolon.mcolon(first, second)].reshape((-1, 2)).T ) # Operate on the rock matrix grid (faces, cells, _) = sps.find(g_max.cell_faces) faces = faces[np.argsort(cells)] nodes, _, _ = sps.find(g_max.face_nodes) indptr = g_max.face_nodes.indptr # Loop on all the well grids for g_w, n_w in zip(gs_w, nodes_w): # extract the start and end point of the segments start = g_w.nodes[:, n_w[0]] end = g_w.nodes[:, n_w[1]] # Lists for the cell_cell_map primary_to_mortar_I, primary_to_mortar_J, primary_to_mortar_data = [], [], [] # Operate on the segments for seg_id, (seg_start, seg_end) in enumerate(zip(start.T, end.T)): # Create the box for the segment by ordering its start and end box = np.sort(np.vstack((seg_start, seg_end)), axis=0).flatten() seg_cells = tree.search(pp.adtree.ADTNode(""dummy_node"", box)) # Loop on all the higher dimensional cells for c in seg_cells: # For the current cell retrieve its faces loc = slice(g_max.cell_faces.indptr[c], g_max.cell_faces.indptr[c + 1]) faces_loc = faces[loc] # Get the local nodes, face based poly = np.array( [ g_max.nodes[:, nodes[indptr[f] : indptr[f + 1]]] for f in faces_loc ] ) # Compute the intersections between the segment and the current higher # dimensional cell ratio = pp.intersections.segments_polyhedron( seg_start, seg_end, poly, tol ) # Store the requested information to build the projection operator if ratio > 0: primary_to_mortar_I += [seg_id] primary_to_mortar_J += [c] primary_to_mortar_data += ratio.tolist() primary_to_mortar_int = sps.csc_matrix( (primary_to_mortar_data, (primary_to_mortar_I, primary_to_mortar_J)), shape=(g_w.num_cells, g_max.num_cells), ) secondary_to_mortar_int = sps.diags(np.ones(g_w.num_cells), format=""csc"") # create the mortar grid and set the maps side_g = {pp.grids.mortar_grid.MortarSides.LEFT_SIDE: g_w.copy()} mg = pp.MortarGrid(g_w.dim, side_g, codim=g_max.dim - g_w.dim) mg.set_projection_to_mortar_int(primary_to_mortar_int, secondary_to_mortar_int) mg.compute_geometry() # add a new edge to the grid bucket gb.add_edge((g_max, g_w), mg._primary_to_mortar_int) d_e = gb.edge_props((g_max, g_w)) d_e[""mortar_grid""] = mg # Update the node number gb.assign_node_ordering() " 2988,"def safe_import(mod_name: str, min_version: Optional[str] = None): """""" Parameters: ----------- mod_name : str Name of the module to be imported min_version : str, default None Minimum required version of the specified mod_name Returns: -------- object The imported module if successful, or False """""" try: mod = __import__(mod_name) except ImportError: return False if not min_version: return mod else: import sys try: version = getattr(sys.modules[mod_name], ""__version__"") except AttributeError: # xlrd uses a capitalized attribute name version = getattr(sys.modules[mod_name], ""__VERSION__"") if version: from distutils.version import LooseVersion if LooseVersion(version) >= LooseVersion(min_version): return mod return False ","def safe_import(mod_name: str, min_version: Optional[str] = None) -> Union[types.ModuleType, bool]: """""" Parameters: ----------- mod_name : str Name of the module to be imported min_version : str, default None Minimum required version of the specified mod_name Returns: -------- object The imported module if successful, or False """""" try: mod = __import__(mod_name) except ImportError: return False if not min_version: return mod else: import sys try: version = getattr(sys.modules[mod_name], ""__version__"") except AttributeError: # xlrd uses a capitalized attribute name version = getattr(sys.modules[mod_name], ""__VERSION__"") if version: from distutils.version import LooseVersion if LooseVersion(version) >= LooseVersion(min_version): return mod return False " 33765,"def wait_for_actor_killed(actor_id, current_num_restarts, timeout=10, retry_interval_ms=100): start = time.time() while time.time() - start <= timeout: actor_status = ray.actors(actor_id) if actor_status[""State""] == ray.gcs_utils.ActorTableData.DEAD \ or actor_status[""NumRestarts""] > current_num_restarts: return time.sleep(retry_interval_ms / 1000.0) raise RuntimeError( ""It took too much time to kill an actor: {}"".format(actor_id)) ","def wait_for_actor_failure(actor_id, current_num_restarts, timeout=10, retry_interval_ms=100): start = time.time() while time.time() - start <= timeout: actor_status = ray.actors(actor_id) if actor_status[""State""] == ray.gcs_utils.ActorTableData.DEAD \ or actor_status[""NumRestarts""] > current_num_restarts: return time.sleep(retry_interval_ms / 1000.0) raise RuntimeError( ""It took too much time to kill an actor: {}"".format(actor_id)) " 33723,"def get_session(): global _session if not _session and warn: logger.warning( ""Session not detected. You should not be calling this function "" ""outside `tune.run` or while using the class API. "") return _session ","def get_session(): global _session if not _session: logger.warning( ""Session not detected. You should not be calling this function "" ""outside `tune.run` or while using the class API. "") return _session " 40566,"def _check_spring_instance_existed(cmd, client, resource_group, name, location, **_): availability_parameters = models.NameAvailabilityParameters(type=""Microsoft.AppPlatform/Spring"", name=name) name_availability = client.services.check_name_availability(location, availability_parameters) if not name_availability.name_available and name_availability.reason == ""AlreadyExists"": raise ClientRequestError(""Service instance '{}' under resource group '{}' is already existed in region '{}', cannot create again."".format(name, resource_group, location)) ","def _check_spring_instance_existed(cmd, client, resource_group, name, location, **_): availability_parameters = models.NameAvailabilityParameters(type=""Microsoft.AppPlatform/Spring"", name=name) name_availability = client.services.check_name_availability(location, availability_parameters) if not name_availability.name_available and name_availability.reason == ""AlreadyExists"": raise ClientRequestError(""Service instance '{}' under resource group '{}' is already existed in region '{}', cannot be created again."".format(name, resource_group, location)) " 32833,"def activate_distributed_tracing_context(pin, int_config, request_headers): """""" Helper for activating a distributed trace's context if enabled in integration config. """""" int_config = int_config or {} if int_config.get(""distributed_tracing_enabled"", False): propagator = HTTPPropagator() context = propagator.extract(request_headers) # Only need to activate the new context if something was propagated if context.trace_id: pin.tracer.context_provider.activate(context) ","def activate_distributed_headers(int_config, request_headers): """""" Helper for activating a distributed trace's context if enabled in integration config. """""" int_config = int_config or {} if int_config.get(""distributed_tracing_enabled"", False): propagator = HTTPPropagator() context = propagator.extract(request_headers) # Only need to activate the new context if something was propagated if context.trace_id: pin.tracer.context_provider.activate(context) " 11516,"def validate_media_types(wrapped): """"""View decorator to make convert certain 4xx errors to 415s"""""" def wrapper(context, request): # If Accept has been set if request.accept: # At least one of the media types in Accept must be known to the app ok_types = [ media_type for media_type in request.accept if media_type in valid_media_types() ] if not ok_types: # If no Accept media types are known, convert to a 415 error context = HTTPUnsupportedMediaType(""Unsupported media type"") response = wrapped(context, request) return response return wrapper ","def validate_media_types(wrapped): """"""View decorator to convert certain 4xx errors to 415s"""""" def wrapper(context, request): # If Accept has been set if request.accept: # At least one of the media types in Accept must be known to the app ok_types = [ media_type for media_type in request.accept if media_type in valid_media_types() ] if not ok_types: # If no Accept media types are known, convert to a 415 error context = HTTPUnsupportedMediaType(""Unsupported media type"") response = wrapped(context, request) return response return wrapper " 29957,"def test_operator(): func = Function('+') assert func.symbol == '+' assert func.func_type == 'arithmetic' assert func.arity == 2 assert func.__repr__() == '' ","def test_operator(): func = Function('+') assert func.symbol == '+' assert func.func_type == 'arithmetic' assert func.arity == 2 assert repr(func) == '' " 53020,"def _clean_deleted_presets(data): includes = [] for include in data[""include""]: if os.path.exists(include): includes.append(include) data[""include""] = includes return data ","def _clean_deleted_presets(data): data[""include""] = [i for i in data[""include""] if os.path.exists(i)] " 41510,"def tmu(mu, data, pdf, init_pars, par_bounds, fixed_vals): r"""""" The test statistic, :math:`t_{\mu}`, for establishing a two-sided interval on the strength parameter, :math:`\mu`, as defiend in Equation (8) in :xref:`arXiv:1007.1727` .. math:: t_{\mu} = -2\ln\lambda\left(\mu\right) where :math:`\lambda\left(\mu\right)` is the profile likelihood ratio as defined in Equation (7) .. math:: \lambda\left(\mu\right) = \frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)}{L\left(\hat{\mu}, \hat{\boldsymbol{\theta}}\right)}\,. Example: >>> import pyhf >>> pyhf.set_backend(""numpy"") >>> model = pyhf.simplemodels.hepdata_like( ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0] ... ) >>> observations = [51, 48] >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata) >>> test_mu = 1.0 >>> init_pars = model.config.suggested_init() >>> par_bounds = model.config.suggested_bounds() >>> par_bounds[model.config.poi_index] = [-10.0, 10.0] >>> fixed_vals = [] >>> pyhf.infer.test_statistics.tmu(test_mu, data, model, init_pars, par_bounds, []) array(3.9549891) Args: mu (Number or Tensor): The signal strength parameter data (Tensor): The data to be considered pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json init_pars (`list`): Values to initialize the model parameters at for the fit par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit fixed_vals (`list`): Parameters held constant in the fit Returns: Float: The calculated test statistic, :math:`t_{\mu}` """""" if pdf.config.poi_index is None: raise UnspecifiedPOI( 'No POI is defined. A POI is required for profile likelihood based test statistics.' ) if par_bounds[pdf.config.poi_index][0] == 0: log.warning( 'tmu test statistic used for fit configuration with POI bounded at zero.\n' + 'Use the tmu_tilde test statistic (pyhf.infer.test_statistics.tmu_tilde) instead.' ) return _tmu_like(mu, data, pdf, init_pars, par_bounds, fixed_vals) ","def tmu(mu, data, pdf, init_pars, par_bounds, fixed_vals): r"""""" The test statistic, :math:`t_{\mu}`, for establishing a two-sided interval on the strength parameter, :math:`\mu`, as defiend in Equation (8) in :xref:`arXiv:1007.1727` .. math:: t_{\mu} = -2\ln\lambda\left(\mu\right) where :math:`\lambda\left(\mu\right)` is the profile likelihood ratio as defined in Equation (7) .. math:: \lambda\left(\mu\right) = \frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)}{L\left(\hat{\mu}, \hat{\boldsymbol{\theta}}\right)}\,. Example: >>> import pyhf >>> pyhf.set_backend(""numpy"") >>> model = pyhf.simplemodels.hepdata_like( ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0] ... ) >>> observations = [51, 48] >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata) >>> test_mu = 1.0 >>> init_pars = model.config.suggested_init() >>> par_bounds = model.config.suggested_bounds() >>> par_bounds[model.config.poi_index] = [-10.0, 10.0] >>> fixed_vals = [] >>> pyhf.infer.test_statistics.tmu(test_mu, data, model, init_pars, par_bounds, fixed_vals) array(3.9549891) Args: mu (Number or Tensor): The signal strength parameter data (Tensor): The data to be considered pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json init_pars (`list`): Values to initialize the model parameters at for the fit par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit fixed_vals (`list`): Parameters held constant in the fit Returns: Float: The calculated test statistic, :math:`t_{\mu}` """""" if pdf.config.poi_index is None: raise UnspecifiedPOI( 'No POI is defined. A POI is required for profile likelihood based test statistics.' ) if par_bounds[pdf.config.poi_index][0] == 0: log.warning( 'tmu test statistic used for fit configuration with POI bounded at zero.\n' + 'Use the tmu_tilde test statistic (pyhf.infer.test_statistics.tmu_tilde) instead.' ) return _tmu_like(mu, data, pdf, init_pars, par_bounds, fixed_vals) " 7586,"def test_fits_to_string_function_error(): """"""Test if will raise a TypeError if incorrect unit type."""""" with pytest.raises(TypeError): u_format.Fits.to_string(None) ","def test_fits_to_string_function_error(): """"""Test if will raise a TypeError if incorrect unit type."""""" with pytest.raises(TypeError, match='Incorrect type'): u_format.Fits.to_string(None) " 41727,"def train(*args, **kwargs): # type: (List[Any], Optional[Dict[Any, Any]]) -> Any """"""Wrapper of LightGBM Training API to tune hyperparameters. .. warning:: This feature is experimental. The interface can change in the future. It tunes important hyperparameters (e.g., `min_child_samples` and `feature_fraction`) in a stepwise manner. Arguments and keyword arguments for `lightgbm.train() `_ can be passed. """""" auto_booster = LightGBMTuner(*args, **kwargs) booster = auto_booster.run() return booster ","def train(*args, **kwargs): # type: (List[Any], Optional[Dict[Any, Any]]) -> Any """"""Wrapper of LightGBM Training API to tune hyperparameters. .. warning:: This feature is experimental. The interface can be changed in the future. It tunes important hyperparameters (e.g., `min_child_samples` and `feature_fraction`) in a stepwise manner. Arguments and keyword arguments for `lightgbm.train() `_ can be passed. """""" auto_booster = LightGBMTuner(*args, **kwargs) booster = auto_booster.run() return booster " 51053,"def _normalize_states( hass: HomeAssistant, entity_history: list[State], device_class: str | None, entity_id: str, ) -> tuple[str | None, list[tuple[float, State]]]: """"""Normalize units."""""" unit = None if device_class not in UNIT_CONVERSIONS: # We're not normalizing this device class, return the state as they are fstates = [ (float(el.state), el) for el in entity_history if _is_number(el.state) ] if fstates: all_units = _get_units(fstates) if len(all_units) > 1: if WARN_UNSTABLE_UNIT not in hass.data: hass.data[WARN_UNSTABLE_UNIT] = set() if entity_id not in hass.data[WARN_UNSTABLE_UNIT]: hass.data[WARN_UNSTABLE_UNIT].add(entity_id) extra = """" if old_metadata := statistics.get_metadata(hass, entity_id): extra = f"" and matches the unit of already compiled statistics {old_metadata['unit_of_measurement']}"" _LOGGER.warning( ""The unit of %s is changing, got multiple%s, generation of long term "" ""statistics will be suppressed unless the unit is stable%s"", entity_id, all_units, extra, ) return None, [] unit = fstates[0][1].attributes.get(ATTR_UNIT_OF_MEASUREMENT) return unit, fstates fstates = [] for state in entity_history: # Exclude non numerical states from statistics if not _is_number(state.state): continue fstate = float(state.state) unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) # Exclude unsupported units from statistics if unit not in UNIT_CONVERSIONS[device_class]: if WARN_UNSUPPORTED_UNIT not in hass.data: hass.data[WARN_UNSUPPORTED_UNIT] = set() if entity_id not in hass.data[WARN_UNSUPPORTED_UNIT]: hass.data[WARN_UNSUPPORTED_UNIT].add(entity_id) _LOGGER.warning(""%s has unknown unit %s"", entity_id, unit) continue fstates.append((UNIT_CONVERSIONS[device_class][unit](fstate), state)) return DEVICE_CLASS_UNITS[device_class], fstates ","def _normalize_states( hass: HomeAssistant, entity_history: list[State], device_class: str | None, entity_id: str, ) -> tuple[str | None, list[tuple[float, State]]]: """"""Normalize units."""""" unit = None if device_class not in UNIT_CONVERSIONS: # We're not normalizing this device class, return the state as they are fstates = [ (float(el.state), el) for el in entity_history if _is_number(el.state) ] if fstates: all_units = _get_units(fstates) if len(all_units) > 1: if WARN_UNSTABLE_UNIT not in hass.data: hass.data[WARN_UNSTABLE_UNIT] = set() if entity_id not in hass.data[WARN_UNSTABLE_UNIT]: hass.data[WARN_UNSTABLE_UNIT].add(entity_id) extra = """" if old_metadata := statistics.get_metadata(hass, entity_id): extra = f"" and matches the unit of already compiled statistics {old_metadata['unit_of_measurement']}"" _LOGGER.warning( ""The unit of %s is changing, got multiple %s, generation of long term "" ""statistics will be suppressed unless the unit is stable %s"", entity_id, all_units, extra, ) return None, [] unit = fstates[0][1].attributes.get(ATTR_UNIT_OF_MEASUREMENT) return unit, fstates fstates = [] for state in entity_history: # Exclude non numerical states from statistics if not _is_number(state.state): continue fstate = float(state.state) unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) # Exclude unsupported units from statistics if unit not in UNIT_CONVERSIONS[device_class]: if WARN_UNSUPPORTED_UNIT not in hass.data: hass.data[WARN_UNSUPPORTED_UNIT] = set() if entity_id not in hass.data[WARN_UNSUPPORTED_UNIT]: hass.data[WARN_UNSUPPORTED_UNIT].add(entity_id) _LOGGER.warning(""%s has unknown unit %s"", entity_id, unit) continue fstates.append((UNIT_CONVERSIONS[device_class][unit](fstate), state)) return DEVICE_CLASS_UNITS[device_class], fstates " 40560,"def load_arguments(self, _): from azure.cli.core.commands.parameters import tags_type from azure.cli.core.commands.validators import get_default_location_from_resource_group with self.argument_context('containerapp compose') as c: c.argument('tags', tags_type) c.argument('location', validator=get_default_location_from_resource_group) c.argument('managed_env', options_list=['--environment', '-e'], help=""Name of the containerapp's environment."") with self.argument_context('containerapp compose create') as c: c.argument('compose_file_path', options_list=['--compose-file-path', '-f'], help='Path to a Docker Compose file with the configuration to import to Azure Containerapps.') c.argument('logs_workspace_name', options_list=['--logs-workspace', '-w'], help='Log analytics workspace name.') ","def load_arguments(self, _): from azure.cli.core.commands.parameters import tags_type from azure.cli.core.commands.validators import get_default_location_from_resource_group with self.argument_context('containerapp compose') as c: c.argument('tags', tags_type) c.argument('location', get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('managed_env', options_list=['--environment', '-e'], help=""Name of the containerapp's environment."") with self.argument_context('containerapp compose create') as c: c.argument('compose_file_path', options_list=['--compose-file-path', '-f'], help='Path to a Docker Compose file with the configuration to import to Azure Containerapps.') c.argument('logs_workspace_name', options_list=['--logs-workspace', '-w'], help='Log analytics workspace name.') " 24074,"def upgradeConfigFrom_1_to_2(profile): # Schema has been modified to split cursor shape into focus and review shapes # Previously, the same cursor shape was used for focus and review try: cursorShape = int(profile[""braille""][""cursorShape""]) except KeyError as e: # Setting does not exist, no need for upgrade of this setting log.debug(""No cursorShape, no action taken."") pass else: del profile[""braille""][""cursorShape""] profile[""braille""][""cursorShapeFocus""] = cursorShape ","def upgradeConfigFrom_5_to_6(profile: Dict): del profile[""braille""][""cursorShape""] profile[""braille""][""cursorShapeFocus""] = cursorShape " 15951,"def get_api(hass: HomeAssistant, config_data: Mapping) -> AbstractGateApi: """"""Get an api object for config data."""""" gate_class = GogoGate2Api if config_data[CONF_DEVICE] == DEVICE_TYPE_ISMARTGATE: gate_class = ISmartGateApi return gate_class( config_data[CONF_IP_ADDRESS], config_data[CONF_USERNAME], config_data[CONF_PASSWORD], httpx_async_client=get_async_client(hass), ) ","def get_api(hass: HomeAssistant, config_data: Mapping[str, Any]) -> AbstractGateApi: """"""Get an api object for config data."""""" gate_class = GogoGate2Api if config_data[CONF_DEVICE] == DEVICE_TYPE_ISMARTGATE: gate_class = ISmartGateApi return gate_class( config_data[CONF_IP_ADDRESS], config_data[CONF_USERNAME], config_data[CONF_PASSWORD], httpx_async_client=get_async_client(hass), ) " 34262,"def validate_files(args): """"""Validate all files needed for training a model. Fails with a non-zero exit code if there are any errors in the data."""""" from rasa.core.validator import Validator from rasa.importers.rasa import RasaFileImporter loop = asyncio.get_event_loop() file_importer = RasaFileImporter( domain_path=args.domain, training_data_paths=args.data ) validator = loop.run_until_complete(Validator.from_importer(file_importer)) domain_is_alright = validator.verify_domain() if not domain_is_alright: sys.exit(1) everything_is_alright = validator.verify_all(not args.fail_on_warnings) sys.exit(0) if everything_is_alright else sys.exit(1) ","def validate_files(args): """"""Validate all files needed for training a model. Fails with a non-zero exit code if there are any errors in the data."""""" from rasa.core.validator import Validator from rasa.importers.rasa import RasaFileImporter loop = asyncio.get_event_loop() file_importer = RasaFileImporter( domain_path=args.domain, training_data_paths=args.data ) validator = loop.run_until_complete(Validator.from_importer(file_importer)) domain_is_valid = validator.verify_domain() if not domain_is_alright: sys.exit(1) everything_is_alright = validator.verify_all(not args.fail_on_warnings) sys.exit(0) if everything_is_alright else sys.exit(1) " 40417,"def _separate( key: str, value: Any, idx: int, slices: Any, incs: Any, batch: BaseData, store: BaseStorage, decrement: bool, ) -> Any: if isinstance(value, Tensor): # Narrow a `torch.Tensor` based on `slices`. # NOTE: We need to take care of decrementing elements appropriately. cat_dim = batch.__cat_dim__(key, value, store) start, end = int(slices[idx]), int(slices[idx + 1]) value = value.narrow(cat_dim or 0, start, end - start) value = value.squeeze(0) if cat_dim is None else value if decrement and (incs.dim() > 1 or int(incs[idx]) != 0): value = value - incs[idx].to(value.device) return value elif isinstance(value, SparseTensor) and decrement: # Narrow a `SparseTensor` based on `slices`. # NOTE: `cat_dim` may return a tuple to allow for diagonal stacking. cat_dim = batch.__cat_dim__(key, value, store) cat_dims = (cat_dim, ) if isinstance(cat_dim, int) else cat_dim for i, dim in enumerate(cat_dims): start, end = int(slices[idx][i]), int(slices[idx + 1][i]) value = value.narrow(dim, start, end - start) return value elif isinstance(value, Mapping): # Recursively separate elements of dictionaries. return { key: _separate(key, elem, idx, slices[key], incs[key] if decrement else None, batch, store, decrement) for key, elem in value.items() } elif (isinstance(value, Sequence) and isinstance(value[0], Sequence) and not isinstance(value[0], str) and len(value[0]) > 0 and isinstance(value[0][0], (Tensor, SparseTensor))): # Recursively separate elements of lists of lists. return [elem[idx] for i, elem in enumerate(value)] elif (isinstance(value, Sequence) and not isinstance(value, str) and isinstance(value[0], (Tensor, SparseTensor))): # Recursively separate elements of lists of Tensors/SparseTensors. return [ _separate(key, elem, idx, slices[i], incs[i] if decrement else None, batch, store, decrement) for i, elem in enumerate(value) ] else: return value[idx] ","def _separate( key: str, value: Any, idx: int, slices: Any, incs: Any, batch: BaseData, store: BaseStorage, decrement: bool, ) -> Any: if isinstance(value, Tensor): # Narrow a `torch.Tensor` based on `slices`. # NOTE: We need to take care of decrementing elements appropriately. cat_dim = batch.__cat_dim__(key, value, store) start, end = int(slices[idx]), int(slices[idx + 1]) value = value.narrow(cat_dim or 0, start, end - start) value = value.squeeze(0) if cat_dim is None else value if decrement and (incs.dim() > 1 or int(incs[idx]) != 0): value = value - incs[idx].to(value.device) return value elif isinstance(value, SparseTensor) and decrement: # Narrow a `SparseTensor` based on `slices`. # NOTE: `cat_dim` may return a tuple to allow for diagonal stacking. cat_dim = batch.__cat_dim__(key, value, store) cat_dims = (cat_dim, ) if isinstance(cat_dim, int) else cat_dim for i, dim in enumerate(cat_dims): start, end = int(slices[idx][i]), int(slices[idx + 1][i]) value = value.narrow(dim, start, end - start) return value elif isinstance(value, Mapping): # Recursively separate elements of dictionaries. return { key: _separate(key, elem, idx, slices[key], incs[key] if decrement else None, batch, store, decrement) for key, elem in value.items() } elif (isinstance(value, Sequence) and isinstance(value[0], Sequence) and not isinstance(value[0], str) and len(value[0]) > 0 and isinstance(value[0][0], (Tensor, SparseTensor))): # Recursively separate elements of lists of lists. return [elem[idx] for elem in value] elif (isinstance(value, Sequence) and not isinstance(value, str) and isinstance(value[0], (Tensor, SparseTensor))): # Recursively separate elements of lists of Tensors/SparseTensors. return [ _separate(key, elem, idx, slices[i], incs[i] if decrement else None, batch, store, decrement) for i, elem in enumerate(value) ] else: return value[idx] " 31372,"def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" params = demisto.params() api_key = params.get('token') base_url = 'https://host.io/api' verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) try: headers = { 'Authorization': f'Bearer {api_key}' } client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy) if demisto.command() == 'test-module': result = test_module_command(client) return_results(result) elif demisto.command() == 'domain': return_results(domain_command(client, demisto.args())) elif demisto.command() == 'hostio-domain-search': return_results(search_command(client, demisto.args())) except Exception as e: demisto.error(traceback.format_exc()) return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') ","def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" params = demisto.params() api_key = params.get('token') base_url = 'https://host.io/api' verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) try: headers = { 'Authorization': f'Bearer {api_key}' } client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy,) if demisto.command() == 'test-module': result = test_module_command(client) return_results(result) elif demisto.command() == 'domain': return_results(domain_command(client, demisto.args())) elif demisto.command() == 'hostio-domain-search': return_results(search_command(client, demisto.args())) except Exception as e: demisto.error(traceback.format_exc()) return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') " 40425,"def test_graph_store_conversion(): graph_store = MyGraphStore() edge_index = get_edge_index(100, 100, 300) edge_index = sort_edge_index(edge_index, sort_by_row=False) adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100)) coo = (edge_index[0], edge_index[1]) csr = adj.csr()[:2] csc = adj.csc()[-2::-1] # Put all edge indices: graph_store.put_edge_index(edge_index=coo, edge_type=('v', '1', 'v'), layout='coo', num_nodes=(100, 100), is_sorted=True) graph_store.put_edge_index(edge_index=csr, edge_type=('v', '2', 'v'), layout='csr', num_nodes=(100, 100)) graph_store.put_edge_index(edge_index=csc, edge_type=('v', '3', 'v'), layout='csc', num_nodes=(100, 100)) def assert_edge_index_equal(expected: torch.Tensor, actual: torch.Tensor): assert torch.equal(sort_edge_index(expected), sort_edge_index(actual)) # Convert to COO: row_dict, col_dict, perm_dict = graph_store.coo() assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 for key in row_dict.keys(): actual = torch.stack((row_dict[key], col_dict[key])) assert_edge_index_equal(actual, edge_index) assert perm_dict[key] is None # Convert to CSR: row_dict, col_dict, perm_dict = graph_store.csr() assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 for key in row_dict: assert torch.equal(row_dict[key], csr[0]) assert torch.equal(col_dict[key], csr[1]) if key == ('v', '1', 'v'): assert perm_dict[key] is not None # Convert to CSC: row_dict, col_dict, perm_dict = graph_store.csc() assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 for key in row_dict: assert torch.equal(row_dict[key], csc[0]) assert torch.equal(col_dict[key], csc[1]) assert perm_dict[key] is None ","def test_graph_store_conversion(): graph_store = MyGraphStore() edge_index = get_edge_index(100, 100, 300) edge_index = sort_edge_index(edge_index, sort_by_row=False) adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100)) coo = (edge_index[0], edge_index[1]) csr = adj.csr()[:2] csc = adj.csc()[-2::-1] # Put all edge indices: graph_store.put_edge_index(edge_index=coo, edge_type=('v', '1', 'v'), layout='coo', num_nodes=(100, 100), is_sorted=True) graph_store.put_edge_index(edge_index=csr, edge_type=('v', '2', 'v'), layout='csr', num_nodes=(100, 100)) graph_store.put_edge_index(edge_index=csc, edge_type=('v', '3', 'v'), layout='csc', num_nodes=(100, 100)) def assert_edge_index_equal(expected: torch.Tensor, actual: torch.Tensor): assert torch.equal(sort_edge_index(expected), sort_edge_index(actual)) # Convert to COO: row_dict, col_dict, perm_dict = graph_store.coo() assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 for key in row_dict.keys(): actual = torch.stack((row_dict[key], col_dict[key])) assert_edge_index_equal(actual, edge_index) assert perm_dict[key] is None # Convert to CSR: rowptr_dict, col_dict, perm_dict = graph_store.csr() assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 for key in row_dict: assert torch.equal(row_dict[key], csr[0]) assert torch.equal(col_dict[key], csr[1]) if key == ('v', '1', 'v'): assert perm_dict[key] is not None # Convert to CSC: row_dict, col_dict, perm_dict = graph_store.csc() assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 for key in row_dict: assert torch.equal(row_dict[key], csc[0]) assert torch.equal(col_dict[key], csc[1]) assert perm_dict[key] is None " 30873,"def get_alert_details(): """""" Retrieve alert details by given ID """""" response = req('GET', 'alert/' + demisto.getArg('alert-id'), None, None) # {'detailed': demisto.getArg('detailed')}) alert = alert_to_readable(response) alert.update({ 'PolicyID': demisto.get(response, 'policy.policyID'), 'PolicySystemDefault': demisto.get(response, 'policy.systemDefault'), 'PolicyLabels': demisto.get(response, 'policy.labels'), 'PolicyLastModifiedOn': demisto.get(response, 'policy.lastModifiedOn'), 'PolicyLastModifiedBy': demisto.get(response, 'policy.lastModifiedBy'), 'RiskScore': demisto.get(response, 'riskDetail.riskScore.score'), 'ResourceRRN': demisto.get(response, 'resource.rrn'), 'ResourceID': demisto.get(response, 'resource.id'), 'ResourceAccountID': demisto.get(response, 'resource.accountId'), 'ResourceRegionID': demisto.get(response, 'resource.regionId'), 'ResourceApiName': demisto.get(response, 'resource.resourceApiName'), 'ResourceUrl': demisto.get(response, 'resource.url'), 'ResourceData': demisto.get(response, 'resource.data'), 'ResourceAccessKeyAge': demisto.get(response, 'resource.additionalInfo.accessKeyAge'), 'ResourceInactiveSinceTs': demisto.get(response, 'resource.additionalInfo.inactiveSinceTs') }) ec = alert_to_context(response) ec['AlertRules'] = [alert_rule.get('name') for alert_rule in response.get('alertRules')] context = {'Redlock.Alert(val.ID === obj.ID)': ec} demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': response, 'EntryContext': context, 'HumanReadable': tableToMarkdown('Alert', alert, removeNull=True) }) ","def get_alert_details(): """""" Retrieve alert details by given ID """""" response = req('GET', 'alert/' + demisto.getArg('alert-id'), None, None) # {'detailed': demisto.getArg('detailed')}) alert = alert_to_readable(response) alert.update({ 'PolicyID': demisto.get(response, 'policy.policyID'), 'PolicySystemDefault': demisto.get(response, 'policy.systemDefault'), 'PolicyLabels': demisto.get(response, 'policy.labels'), 'PolicyLastModifiedOn': demisto.get(response, 'policy.lastModifiedOn'), 'PolicyLastModifiedBy': demisto.get(response, 'policy.lastModifiedBy'), 'RiskScore': demisto.get(response, 'riskDetail.riskScore.score'), 'ResourceRRN': demisto.get(response, 'resource.rrn'), 'ResourceID': demisto.get(response, 'resource.id'), 'ResourceAccountID': demisto.get(response, 'resource.accountId'), 'ResourceRegionID': demisto.get(response, 'resource.regionId'), 'ResourceApiName': demisto.get(response, 'resource.resourceApiName'), 'ResourceUrl': demisto.get(response, 'resource.url'), 'ResourceData': demisto.get(response, 'resource.data'), 'ResourceAccessKeyAge': demisto.get(response, 'resource.additionalInfo.accessKeyAge'), 'ResourceInactiveSinceTs': demisto.get(response, 'resource.additionalInfo.inactiveSinceTs') }) ec = alert_to_context(response) ec['AlertRules'] = [alert_rule.get('name') for alert_rule in response.get('alertRules', [])] context = {'Redlock.Alert(val.ID === obj.ID)': ec} demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': response, 'EntryContext': context, 'HumanReadable': tableToMarkdown('Alert', alert, removeNull=True) }) " 43922,"def contracted_norm(l, alpha, a): r""""""Compute the normalization constant for a contracted Gaussian function. A contracted Gaussian function is defined as .. math:: \psi = a_1 G_1 + a_2 G_2 + a_3 G_3, where :math:`a` denotes the contraction coefficients and :math:`G` is a Gaussian function. The normalization constant for this function is computed as .. math:: N(l, \alpha, a) = [\frac{\pi^{3/2}(2\l_x-1)!! (2\l_y-1)!! (2\l_z-1)!!}{2^{l_x + l_y + l_z}} \sum_{i,j} \frac{a_i a_j}{(\alpha_i + \alpha_j)^{{l_x + l_y + l_z+3/2}}}]^{-1/2} where :math:`l`, :math:`\alpha` and :math:`a` are the angular momentum quantum numbers, the exponents of the Gaussian functions and the contraction coefficients, respectively. Args: l (tuple[int]): angular momentum quantum numbers of the basis function alpha (array[float]): exponents of the primitive Gaussian functions a (array[float]): coefficients of the contracted Gaussian functions Returns: n (array[float]): normalization coefficient **Example** >>> l = (0, 0, 0) >>> alpha = np.array([3.425250914, 0.6239137298, 0.168855404]) >>> a = np.array([1.79444183, 0.50032649, 0.18773546]) >>> n = contracted_norm(l, alpha, a) >>> print(n) 0.39969026908800853 """""" lx, ly, lz = l c = np.pi ** 1.5 / 2 ** sum(l) * fac2(2 * lx - 1) * fac2(2 * ly - 1) * fac2(2 * lz - 1) s = ( (a.reshape(len(a), 1) * a) / ((alpha.reshape(len(alpha), 1) + alpha) ** (sum(l) + 1.5)) ).sum() n = 1 / anp.sqrt(c * s) return n ","def contracted_norm(l, alpha, a): r""""""Compute the normalization constant for a contracted Gaussian function. A contracted Gaussian function is defined as .. math:: \psi = a_1 G_1 + a_2 G_2 + a_3 G_3, where :math:`a` denotes the contraction coefficients and :math:`G` is a primitive Gaussian function. The normalization constant for this function is computed as .. math:: N(l, \alpha, a) = [\frac{\pi^{3/2}(2\l_x-1)!! (2\l_y-1)!! (2\l_z-1)!!}{2^{l_x + l_y + l_z}} \sum_{i,j} \frac{a_i a_j}{(\alpha_i + \alpha_j)^{{l_x + l_y + l_z+3/2}}}]^{-1/2} where :math:`l`, :math:`\alpha` and :math:`a` are the angular momentum quantum numbers, the exponents of the Gaussian functions and the contraction coefficients, respectively. Args: l (tuple[int]): angular momentum quantum numbers of the basis function alpha (array[float]): exponents of the primitive Gaussian functions a (array[float]): coefficients of the contracted Gaussian functions Returns: n (array[float]): normalization coefficient **Example** >>> l = (0, 0, 0) >>> alpha = np.array([3.425250914, 0.6239137298, 0.168855404]) >>> a = np.array([1.79444183, 0.50032649, 0.18773546]) >>> n = contracted_norm(l, alpha, a) >>> print(n) 0.39969026908800853 """""" lx, ly, lz = l c = np.pi ** 1.5 / 2 ** sum(l) * fac2(2 * lx - 1) * fac2(2 * ly - 1) * fac2(2 * lz - 1) s = ( (a.reshape(len(a), 1) * a) / ((alpha.reshape(len(alpha), 1) + alpha) ** (sum(l) + 1.5)) ).sum() n = 1 / anp.sqrt(c * s) return n " 44500,"def get_parameter_type( type_name: Optional[str]) -> pipeline_spec_pb2.PrimitiveType: """"""Get the IR I/O parameter type for the given ComponentSpec I/O type. Args: type_name: type name of the ComponentSpec I/O type. Returns: The enum value of the mapped IR I/O primitive type. Raises: AttributeError: if type_name os not a string type. """""" return _PARAMETER_TYPES_MAPPING.get(type_name.lower()) ","def get_parameter_type( type_name: Optional[str]) -> pipeline_spec_pb2.PrimitiveType: """"""Get the IR I/O parameter type for the given ComponentSpec I/O type. Args: type_name: type name of the ComponentSpec I/O type. Returns: The enum value of the mapped IR I/O primitive type. Raises: AttributeError: if type_name is not a string type. """""" return _PARAMETER_TYPES_MAPPING.get(type_name.lower()) " 24269,"def test_redis_default(aggregator, redis_auth, redis_instance): db = redis.Redis(port=PORT, db=14, password=PASSWORD, host=HOST) db.flushdb() db.lpush(""test_list"", 1) db.lpush(""test_list"", 2) db.lpush(""test_list"", 3) db.set(""key1"", ""value"") db.set(""key2"", ""value"") db.setex(""expirekey"", 1000, ""expirevalue"") redis_check = Redis('redisdb', {}, [redis_instance]) redis_check.check(redis_instance) # check the aggregator received some metrics assert aggregator.metric_names, ""No metrics returned"" # check those metrics have the right tags expected = ['foo:bar', 'redis_host:{}'.format(HOST), 'redis_port:6379', 'redis_role:master'] expected_db = expected + ['redis_db:db14'] assert aggregator.metric_names for name in aggregator.metric_names: if name in DB_TAGGED_METRICS: aggregator.assert_metric(name, tags=expected_db) elif name != 'redis.key.length' and name != 'redis.net.connections': aggregator.assert_metric(name, tags=expected) aggregator.assert_metric('redis.key.length', 3, count=1, tags=expected_db + ['key:test_list', 'key_type:list']) aggregator.assert_metric('redis.net.connections', count=1, tags=expected + ['source:unknown']) aggregator.assert_metric('redis.net.maxclients') # in the old tests these was explicitly asserted, keeping it like that assert 'redis.net.commands' in aggregator.metric_names version = db.info().get('redis_version') if StrictVersion(version) >= StrictVersion('2.6.0'): # instantaneous_ops_per_sec info is only available on redis>=2.6 assert 'redis.net.instantaneous_ops_per_sec' in aggregator.metric_names db.flushdb() ","def test_redis_default(aggregator, redis_auth, redis_instance): db = redis.Redis(port=PORT, db=14, password=PASSWORD, host=HOST) db.flushdb() db.lpush(""test_list"", 1) db.lpush(""test_list"", 2) db.lpush(""test_list"", 3) db.set(""key1"", ""value"") db.set(""key2"", ""value"") db.setex(""expirekey"", 1000, ""expirevalue"") redis_check = Redis('redisdb', {}, [redis_instance]) redis_check.check(redis_instance) # check the aggregator received some metrics assert aggregator.metric_names, ""No metrics returned"" # check those metrics have the right tags expected = ['foo:bar', 'redis_host:{}'.format(HOST), 'redis_port:6379', 'redis_role:master'] expected_db = expected + ['redis_db:db14'] assert aggregator.metric_names for name in aggregator.metric_names: if name in DB_TAGGED_METRICS: aggregator.assert_metric(name, tags=expected_db) elif name not in ('redis.key.length', 'redis.net.connections'): aggregator.assert_metric(name, tags=expected) aggregator.assert_metric('redis.key.length', 3, count=1, tags=expected_db + ['key:test_list', 'key_type:list']) aggregator.assert_metric('redis.net.connections', count=1, tags=expected + ['source:unknown']) aggregator.assert_metric('redis.net.maxclients') # in the old tests these was explicitly asserted, keeping it like that assert 'redis.net.commands' in aggregator.metric_names version = db.info().get('redis_version') if StrictVersion(version) >= StrictVersion('2.6.0'): # instantaneous_ops_per_sec info is only available on redis>=2.6 assert 'redis.net.instantaneous_ops_per_sec' in aggregator.metric_names db.flushdb() " 56344,"def distance_matrix(stream_list, shift_len=0.0, allow_individual_trace_shifts=True, cores=1): """""" Compute distance matrix for waveforms based on cross-correlations. Function to compute the distance matrix for all templates - will give distance as 1-abs(cccoh), e.g. a well correlated pair of templates will have small distances, and an equally well correlated reverse image will have the same distance as a positively correlated image - this is an issue. :type stream_list: list :param stream_list: List of the :class:`obspy.core.stream.Stream` to compute the distance matrix for :type shift_len: float :param shift_len: How many seconds for templates to shift :type allow_individual_trace_shifts: bool :param allow_individual_trace_shifts: Controls whether templates are shifted by shift_len in relation to the picks as a whole, or whether each trace can be shifted individually. Defaults to True. :type cores: int :param cores: Number of cores to parallel process using, defaults to 1. :returns: distance matrix :rtype: :class:`numpy.ndarray` .. warning:: Because distance is given as :math:`1-abs(coherence)`, negatively correlated and positively correlated objects are given the same distance. .. note:: Requires all traces to have the same sampling rate and same length. """""" allow_individual_trace_shifts =\ allow_individual_trace_shifts and shift_len > 0 # Initialize square matrix dist_mat = np.array([np.array([0.0] * len(stream_list))] * len(stream_list)) shift_mat = np.zeros_like(dist_mat) shift_mat = np.zeros([len(stream_list), len(stream_list), max([len(st) for st in stream_list])]) n_shifts_per_stream = 1 for i, master in enumerate(stream_list): dist_list, shift_list = cross_chan_correlation( st1=master, streams=stream_list, shift_len=shift_len, allow_individual_trace_shifts=allow_individual_trace_shifts, xcorr_func='fftw', cores=cores) dist_mat[i] = 1 - dist_list if allow_individual_trace_shifts: n_shifts_per_stream = shift_list.shape[1] shift_mat[i, 0:, 0:n_shifts_per_stream] = shift_list else: shift_mat[i, 0:, 0:n_shifts_per_stream] = shift_list if shift_len == 0: assert np.allclose(dist_mat, dist_mat.T, atol=0.00001) # Force perfect symmetry dist_mat = (dist_mat + dist_mat.T) / 2 shift_mat = shift_mat[:, :, 0:n_shifts_per_stream].squeeze() else: # get the shortest distance for each correlation pair dist_mat_shortest = np.minimum(dist_mat, dist_mat.T) # Indicator says which matrix has shortest dist: value 0: mat2; 1: mat1 mat_indicator = dist_mat_shortest == dist_mat mat_indicator = np.repeat(mat_indicator[:, :, np.newaxis], n_shifts_per_stream, axis=2)[:, :] # Get shift for the shortest distances shift_mat = shift_mat[:, :, 0:n_shifts_per_stream][:, :] shift_mat = shift_mat * mat_indicator +\ np.transpose(shift_mat, [1, 0, 2]) * (1 - mat_indicator) dist_mat = dist_mat_shortest np.fill_diagonal(dist_mat, 0) return dist_mat, shift_mat.squeeze() ","def distance_matrix(stream_list, shift_len=0.0, allow_individual_trace_shifts=True, cores=1): """""" Compute distance matrix for waveforms based on cross-correlations. Function to compute the distance matrix for all templates - will give distance as 1-abs(cccoh), e.g. a well correlated pair of templates will have small distances, and an equally well correlated reverse image will have the same distance as a positively correlated image - this is an issue. :type stream_list: list :param stream_list: List of the :class:`obspy.core.stream.Stream` to compute the distance matrix for :type shift_len: float :param shift_len: How many seconds for templates to shift :type allow_individual_trace_shifts: bool :param allow_individual_trace_shifts: Controls whether templates are shifted by shift_len in relation to the picks as a whole, or whether each trace can be shifted individually. Defaults to True. :type cores: int :param cores: Number of cores to parallel process using, defaults to 1. :returns: distance matrix :rtype: :class:`numpy.ndarray` .. warning:: Because distance is given as :math:`1-abs(coherence)`, negatively correlated and positively correlated objects are given the same distance. .. note:: Requires all traces to have the same sampling rate and same length. """""" allow_individual_trace_shifts = ( allow_individual_trace_shifts and shift_len > 0) # Initialize square matrix dist_mat = np.array([np.array([0.0] * len(stream_list))] * len(stream_list)) shift_mat = np.zeros_like(dist_mat) shift_mat = np.zeros([len(stream_list), len(stream_list), max([len(st) for st in stream_list])]) n_shifts_per_stream = 1 for i, master in enumerate(stream_list): dist_list, shift_list = cross_chan_correlation( st1=master, streams=stream_list, shift_len=shift_len, allow_individual_trace_shifts=allow_individual_trace_shifts, xcorr_func='fftw', cores=cores) dist_mat[i] = 1 - dist_list if allow_individual_trace_shifts: n_shifts_per_stream = shift_list.shape[1] shift_mat[i, 0:, 0:n_shifts_per_stream] = shift_list else: shift_mat[i, 0:, 0:n_shifts_per_stream] = shift_list if shift_len == 0: assert np.allclose(dist_mat, dist_mat.T, atol=0.00001) # Force perfect symmetry dist_mat = (dist_mat + dist_mat.T) / 2 shift_mat = shift_mat[:, :, 0:n_shifts_per_stream].squeeze() else: # get the shortest distance for each correlation pair dist_mat_shortest = np.minimum(dist_mat, dist_mat.T) # Indicator says which matrix has shortest dist: value 0: mat2; 1: mat1 mat_indicator = dist_mat_shortest == dist_mat mat_indicator = np.repeat(mat_indicator[:, :, np.newaxis], n_shifts_per_stream, axis=2)[:, :] # Get shift for the shortest distances shift_mat = shift_mat[:, :, 0:n_shifts_per_stream][:, :] shift_mat = shift_mat * mat_indicator +\ np.transpose(shift_mat, [1, 0, 2]) * (1 - mat_indicator) dist_mat = dist_mat_shortest np.fill_diagonal(dist_mat, 0) return dist_mat, shift_mat.squeeze() " 819,"def test_constants(): assert Dagger(0) == 0 assert Dagger(1) == 1 assert Dagger(1 + 1j) == 1 - 1j ","def test_constants(): assert Dagger(0) == 0 assert Dagger(1) == 1 assert Dagger(1 + I) == 1 - I " 42332,"def run_command(executable_cmd, cmdline_args=None, **kwargs): ''' Run an (Ansible) commands in the foreground and return a Runner object when complete. :param executable_cmd: The command to be executed. :param cmdline_args: A list of arguments to be passed to the executable command. :param input_fd: This parameter is applicable when ``runner_mode`` is set to ``subprocess``, it provides the input file descrption to interact with the sub-process running the command. :param output_fd: The output file descriptor to stream the output of command execution. :param error_fd: This parameter is applicable when ``runner_mode`` is set to ``subprocess``, it provides the error file descrption to read the error received while executing the command. :param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. If the value of ``input_fd`` parameter is set or the executable command is one of ``ansible-config``, ``ansible-doc`` or ``ansible-galaxy`` the default value is set to ``subprocess`` else in other cases it is set to ``pexpect``. :param cwd: The current working directory from which the command in executable_cmd shoul be be executed. :param envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param quiet: Disable all output :param json_mode: Store event data in place of stdout on the console and in the stdout file :param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param process_isolation: Enable process isolation, using a container engine (e.g. podman). :param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param container_options: List of container options to pass to execution engine. :param container_workdir: The working directory within the container. :param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :type executable_cmd: str :type cmdline_args: list :type input_fd: file descriptor :type output_fd: file descriptor :type error_fd: file descriptor :type runner_mode: str :type cwd: str :type envvars: dict :type passwords: dict :type settings: dict :type private_data_dir: str :type project_dir: str :type artifact_dir: str :type fact_cache_type: str :type fact_cache: str :type process_isolation: bool :type process_isolation_executable: str :type container_image: str :type container_volume_mounts: list :type container_options: list :type container_workdir: str :type ident: str :type rotate_artifacts: int :type ssh_key: str :type quiet: bool :type json_mode: bool :type event_handler: function :type cancel_callback: function :type finished_callback: function :type status_handler: function :type artifacts_handler: function :returns: Retunes a tuple of return code, response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as ``pexpect`` uses same output descriptor for stdout and stderr. ''' r = init_command_config(executable_cmd, cmdline_args=cmdline_args, **kwargs) r.run() response = r.stdout.read() error = r.stderr.read() return r.rc, response, error ","def run_command(executable_cmd, cmdline_args=None, **kwargs): ''' Run an (Ansible) commands in the foreground and return a Runner object when complete. :param executable_cmd: The command to be executed. :param cmdline_args: A list of arguments to be passed to the executable command. :param input_fd: This parameter is applicable when ``runner_mode`` is set to ``subprocess``, it provides the input file descrption to interact with the sub-process running the command. :param output_fd: The output file descriptor to stream the output of command execution. :param error_fd: This parameter is applicable when ``runner_mode`` is set to ``subprocess``, it provides the error file descrption to read the error received while executing the command. :param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. If the value of ``input_fd`` parameter is set or the executable command is one of ``ansible-config``, ``ansible-doc`` or ``ansible-galaxy`` the default value is set to ``subprocess`` else in other cases it is set to ``pexpect``. :param cwd: The current working directory from which the command in executable_cmd shoul be be executed. :param envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param quiet: Disable all output :param json_mode: Store event data in place of stdout on the console and in the stdout file :param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param process_isolation: Enable process isolation, using a container engine (e.g. podman). :param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param container_options: List of container options to pass to execution engine. :param container_workdir: The working directory within the container. :param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :type executable_cmd: str :type cmdline_args: list :type input_fd: file descriptor :type output_fd: file descriptor :type error_fd: file descriptor :type runner_mode: str :type cwd: str :type envvars: dict :type passwords: dict :type settings: dict :type private_data_dir: str :type project_dir: str :type artifact_dir: str :type fact_cache_type: str :type fact_cache: str :type process_isolation: bool :type process_isolation_executable: str :type container_image: str :type container_volume_mounts: list :type container_options: list :type container_workdir: str :type ident: str :type rotate_artifacts: int :type ssh_key: str :type quiet: bool :type json_mode: bool :type event_handler: function :type cancel_callback: function :type finished_callback: function :type status_handler: function :type artifacts_handler: function :returns: Returns a tuple of return code, response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as ``pexpect`` uses same output descriptor for stdout and stderr. ''' r = init_command_config(executable_cmd, cmdline_args=cmdline_args, **kwargs) r.run() response = r.stdout.read() error = r.stderr.read() return r.rc, response, error " 5690,"def binomtest(k, n, p=0.5, alternative='two-sided'): """""" Perform a test that the probability of success is p. The binomial test [1]_ is a test of the null hypothesis that the probability of success in a Bernoulli experiment is `p`. Details of the test can be found in many texts on statistics, such as section 24.5 of [2]_. Parameters ---------- k : int The number of successes. n : int The number of trials. p : float, optional The hypothesized probability of success, i.e. the expected proportion of successes. The value must be in the interval ``0 <= p <= 1``. The default value is ``p = 0.5``. alternative : {'two-sided', 'greater', 'less'}, optional Indicates the alternative hypothesis. The default value is 'two-sided'. Returns ------- result : `BinomTestResult` instance The return value is an object with the following attributes: k : int The number of successes (copied from `binomtest` input). n : int The number of trials (copied from `binomtest` input). alternative : str Indicates the alternative hypothesis specified in the input to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``, or ``'less'``. pvalue : float The p-value of the hypothesis test. proportion_estimate : float The estimate of the proportion of successes. The object has the following methods: proportion_ci(confidence_level=0.95, method='exact') : Compute the confidence interval for ``proportion_estimate``. Notes ----- .. versionadded:: 1.7.0 References ---------- .. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test .. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition), Prentice Hall, Upper Saddle River, New Jersey USA (2010) Examples -------- >>> from scipy.stats import binomtest A car manufacturer claims that no more than 10% of their cars are unsafe. 15 cars are inspected for safety, 3 were found to be unsafe. Test the manufacturer's claim: >>> result = binomtest(3, n=15, p=0.1, alternative='greater') >>> result.pvalue 0.18406106910639114 The null hypothesis cannot be rejected at the 5% level of significance because the returned p-value is greater than the critical value of 5%. The estimated proportion is simply ``3/15``: >>> result.proportion_estimate 0.2 We can use the `proportion_ci()` method of the result to compute the confidence interval of the estimate: >>> result.proportion_ci(confidence_level=0.95) ConfidenceInterval(low=0.056846867590246826, high=1.0) """""" k = _validate_int(k, 'k', minimum=0) n = _validate_int(n, 'n', minimum=1) if k > n: raise ValueError('k must not be greater than n.') if not (0 <= p <= 1): raise ValueError(""p must be in range [0,1]"") if alternative not in ('two-sided', 'less', 'greater'): raise ValueError(""alternative not recognized; \n"" ""must be 'two-sided', 'less' or 'greater'"") a_fn = lambda x1:binom.pmf(x1,n,p) if alternative == 'less': pval = binom.cdf(k, n, p) elif alternative == 'greater': pval = binom.sf(k-1, n, p) else: # alternative is 'two-sided' d = binom.pmf(k, n, p) rerr = 1 + 1e-7 if k == p * n: # special case as shortcut, would also be handled by `else` below pval = 1. elif k < p * n: y = n-binary_search_for_binom_tst(a_fn,d*rerr,np.ceil(p * n),n)+1 pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p) else: y = binary_search_for_binom_tst(a_fn,d*rerr,0,np.floor(p*n) + 1,True)+1 pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p) pval = min(1.0, pval) result = BinomTestResult(k=k, n=n, alternative=alternative, proportion_estimate=k/n, pvalue=pval) return result ","def binomtest(k, n, p=0.5, alternative='two-sided'): """""" Perform a test that the probability of success is p. The binomial test [1]_ is a test of the null hypothesis that the probability of success in a Bernoulli experiment is `p`. Details of the test can be found in many texts on statistics, such as section 24.5 of [2]_. Parameters ---------- k : int The number of successes. n : int The number of trials. p : float, optional The hypothesized probability of success, i.e. the expected proportion of successes. The value must be in the interval ``0 <= p <= 1``. The default value is ``p = 0.5``. alternative : {'two-sided', 'greater', 'less'}, optional Indicates the alternative hypothesis. The default value is 'two-sided'. Returns ------- result : `BinomTestResult` instance The return value is an object with the following attributes: k : int The number of successes (copied from `binomtest` input). n : int The number of trials (copied from `binomtest` input). alternative : str Indicates the alternative hypothesis specified in the input to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``, or ``'less'``. pvalue : float The p-value of the hypothesis test. proportion_estimate : float The estimate of the proportion of successes. The object has the following methods: proportion_ci(confidence_level=0.95, method='exact') : Compute the confidence interval for ``proportion_estimate``. Notes ----- .. versionadded:: 1.7.0 References ---------- .. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test .. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition), Prentice Hall, Upper Saddle River, New Jersey USA (2010) Examples -------- >>> from scipy.stats import binomtest A car manufacturer claims that no more than 10% of their cars are unsafe. 15 cars are inspected for safety, 3 were found to be unsafe. Test the manufacturer's claim: >>> result = binomtest(3, n=15, p=0.1, alternative='greater') >>> result.pvalue 0.18406106910639114 The null hypothesis cannot be rejected at the 5% level of significance because the returned p-value is greater than the critical value of 5%. The estimated proportion is simply ``3/15``: >>> result.proportion_estimate 0.2 We can use the `proportion_ci()` method of the result to compute the confidence interval of the estimate: >>> result.proportion_ci(confidence_level=0.95) ConfidenceInterval(low=0.056846867590246826, high=1.0) """""" k = _validate_int(k, 'k', minimum=0) n = _validate_int(n, 'n', minimum=1) if k > n: raise ValueError('k must not be greater than n.') if not (0 <= p <= 1): raise ValueError(""p must be in range [0,1]"") if alternative not in ('two-sided', 'less', 'greater'): raise ValueError(""alternative not recognized; \n"" ""must be 'two-sided', 'less' or 'greater'"") a_fn = lambda x1:binom.pmf(x1,n,p) if alternative == 'less': pval = binom.cdf(k, n, p) elif alternative == 'greater': pval = binom.sf(k-1, n, p) else: # alternative is 'two-sided' d = binom.pmf(k, n, p) rerr = 1 + 1e-7 if k == p * n: # special case as shortcut, would also be handled by `else` below pval = 1. elif k < p * n: y = n-binary_search_for_binom_tst(a_fn, d*rerr, np.ceil(p * n), n)+1 pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p) else: y = binary_search_for_binom_tst(a_fn,d*rerr,0,np.floor(p*n) + 1,True)+1 pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p) pval = min(1.0, pval) result = BinomTestResult(k=k, n=n, alternative=alternative, proportion_estimate=k/n, pvalue=pval) return result " 20548,"def compute_mtr(nii_mt1, nii_mt0, threshold_mtr=100): """""" Compute Magnetization Transfer Ratio in percentage. :param nii_mt1: Image object :param nii_mt0: Image object :param threshold_mtr: float: value above which number will be clipped :return: nii_mtr """""" # Convert input before compute MTR nii_mt1.change_type('float32') nii_mt0.change_type('float32') # Initialize Image object nii_mtr = nii_mt1.copy() # Compute MTR nii_mtr.data = divide_after_removing_zero(100 * (nii_mt0.data - nii_mt1.data), nii_mt0.data, threshold_mtr) return nii_mtr ","def compute_mtr(nii_mt1, nii_mt0, threshold_mtr=100): """""" Compute Magnetization Transfer Ratio in percentage. :param nii_mt1: Image object :param nii_mt0: Image object :param threshold_mtr: float: value above which number will be clipped :return: nii_mtr """""" # Convert input to avoid numerical errors from int16 data (https://github.com/spinalcordtoolbox/spinalcordtoolbox/issues/3636) nii_mt1.change_type('float32') nii_mt0.change_type('float32') # Initialize Image object nii_mtr = nii_mt1.copy() # Compute MTR nii_mtr.data = divide_after_removing_zero(100 * (nii_mt0.data - nii_mt1.data), nii_mt0.data, threshold_mtr) return nii_mtr " 27953,"def perform_build_command(logfile, command, context, keep_link, silent=False): """""" Build the project and create a log file. """""" LOG.info(""Starting build ..."") try: original_env_file = os.environ['CODECHECKER_ORIGINAL_BUILD_ENV'] LOG.debug_analyzer('Loading original build env from: %s', original_env_file) with open(original_env_file, 'rb') as env_file: original_env = pickle.load(env_file) # The env is loaded we do not need it anymore. # Remove temporary directory. try: tmp_dir, _ = os.path.split(original_env_file) shutil.rmtree(tmp_dir) except OSError as ex: if ex.errno != errno.ENOENT: LOG.warning('Failed to remove temporary directory: %s ' 'Manual cleanup is required.', tmp_dir) except Exception as ex: LOG.warning(str(ex)) LOG.warning('Failed to get saved original_env' 'using a current copy for logging.') original_env = os.environ.copy() # Run user's commands with intercept. if host_check.check_intercept(original_env): LOG.debug_analyzer(""with intercept ..."") final_command = command command = ' '.join([""intercept-build"", ""--cdb"", logfile, ""sh -c \"""" + final_command + ""\""""]) log_env = original_env LOG.debug_analyzer(command) # Run user's commands in shell. else: # TODO: better platform detection. if platform.system() == 'Linux': LOG.debug_analyzer(""with ld logger ..."") open(logfile, 'a').close() # Same as linux's touch. log_env = env.get_log_env(logfile, context, original_env) if 'CC_LOGGER_GCC_LIKE' not in log_env: log_env['CC_LOGGER_GCC_LIKE'] = 'gcc:g++:clang:clang++:cc:c++' if keep_link or ('CC_LOGGER_KEEP_LINK' in log_env and log_env['CC_LOGGER_KEEP_LINK'] == 'true'): log_env['CC_LOGGER_KEEP_LINK'] = 'true' else: LOG.error(""Intercept-build is required"" "" to run CodeChecker in OS X."") sys.exit(1) LOG.debug_analyzer(log_env) try: ret_code = execute_buildcmd(command, silent, log_env) if ret_code == 0: LOG.info(""Build finished successfully."") LOG.debug_analyzer(""The logfile is: %s"", logfile) else: LOG.info(""Build failed."") sys.exit(ret_code) except Exception as ex: LOG.error(""Calling original build command failed."") LOG.error(str(ex)) sys.exit(1) finally: # Removing flock lock file. logfile_lock = logfile + '.lock' if os.path.exists(logfile_lock): os.remove(logfile_lock) ","def perform_build_command(logfile, command, context, keep_link, silent=False): """""" Build the project and create a log file. """""" LOG.info(""Starting build ..."") try: original_env_file = os.environ['CODECHECKER_ORIGINAL_BUILD_ENV'] LOG.debug_analyzer('Loading original build env from: %s', original_env_file) with open(original_env_file, 'rb') as env_file: original_env = pickle.load(env_file) # The env is loaded we do not need it anymore. # Remove temporary directory. try: tmp_dir, _ = os.path.split(original_env_file) shutil.rmtree(tmp_dir) except OSError as ex: if ex.errno != errno.ENOENT: LOG.warning('Failed to remove temporary directory: %s. ' 'Manual cleanup is required.', tmp_dir) except Exception as ex: LOG.warning(str(ex)) LOG.warning('Failed to get saved original_env' 'using a current copy for logging.') original_env = os.environ.copy() # Run user's commands with intercept. if host_check.check_intercept(original_env): LOG.debug_analyzer(""with intercept ..."") final_command = command command = ' '.join([""intercept-build"", ""--cdb"", logfile, ""sh -c \"""" + final_command + ""\""""]) log_env = original_env LOG.debug_analyzer(command) # Run user's commands in shell. else: # TODO: better platform detection. if platform.system() == 'Linux': LOG.debug_analyzer(""with ld logger ..."") open(logfile, 'a').close() # Same as linux's touch. log_env = env.get_log_env(logfile, context, original_env) if 'CC_LOGGER_GCC_LIKE' not in log_env: log_env['CC_LOGGER_GCC_LIKE'] = 'gcc:g++:clang:clang++:cc:c++' if keep_link or ('CC_LOGGER_KEEP_LINK' in log_env and log_env['CC_LOGGER_KEEP_LINK'] == 'true'): log_env['CC_LOGGER_KEEP_LINK'] = 'true' else: LOG.error(""Intercept-build is required"" "" to run CodeChecker in OS X."") sys.exit(1) LOG.debug_analyzer(log_env) try: ret_code = execute_buildcmd(command, silent, log_env) if ret_code == 0: LOG.info(""Build finished successfully."") LOG.debug_analyzer(""The logfile is: %s"", logfile) else: LOG.info(""Build failed."") sys.exit(ret_code) except Exception as ex: LOG.error(""Calling original build command failed."") LOG.error(str(ex)) sys.exit(1) finally: # Removing flock lock file. logfile_lock = logfile + '.lock' if os.path.exists(logfile_lock): os.remove(logfile_lock) " 57748,"def get_test_list_and_content_packs_to_install(files_string, branch_name, minimum_server_version='0', conf=deepcopy(CONF), id_set=deepcopy(ID_SET)): """"""Create a test list that should run"""""" (modified_files_with_relevant_tests, modified_tests_list, changed_common, is_conf_json, sample_tests, modified_metadata_list, is_reputations_json, is_indicator_json) = get_modified_files_for_testing(files_string) all_modified_files_paths = set( modified_files_with_relevant_tests + modified_tests_list + changed_common + sample_tests ) from_version, to_version = get_from_version_and_to_version_bounderies(all_modified_files_paths, id_set, modified_metadata_list=modified_metadata_list, ) # Check if only README file in file string, if so, no need to create the servers. documentation_changes_only = is_documentation_changes_only(files_string) create_filter_envs_file(from_version, to_version, documentation_changes_only=documentation_changes_only) tests = set([]) packs_to_install = set([]) # Get packs and tests for changed scripts integration and playbooks if modified_files_with_relevant_tests: tests, packs_to_install = find_tests_and_content_packs_for_modified_files(modified_files_with_relevant_tests, conf, id_set) for pack in modified_metadata_list: pack_tests = get_tests_for_pack(tools.pack_name_to_path(pack)) packs_to_install.add(pack) tests = tests.union(pack_tests) # Adding a unique test for a json file. if is_reputations_json: tests.add('FormattingPerformance - Test') tests.add('reputations.json Test') tests.add('Indicators reputation-.json Test') if is_indicator_json: tests.add('Test IP Indicator Fields') for file_path in modified_tests_list: test = tools.collect_ids(file_path) if test not in tests: tests.add(test) if is_conf_json: tests = tests.union(get_test_from_conf(branch_name, conf)) if changed_common: tests.add('TestCommonPython') # get all modified packs - not just tests related # TODO: need to move the logic of collecting packs of all items to be inside get_modified_files_for_testing modified_packs = get_modified_packs(files_string) if modified_packs: packs_to_install = packs_to_install.union(modified_packs) # Get packs of integrations corresponding to each test, as listed in conf.json packs_of_tested_integrations = conf.get_packs_of_tested_integrations(tests, id_set) packs_to_install = packs_to_install.union(packs_of_tested_integrations) # Get packs that contains each of the collected tests packs_of_collected_tests = get_content_pack_name_of_test(tests, id_set) packs_to_install = packs_to_install.union(packs_of_collected_tests) # All filtering out of packs should be done here packs_to_install = {pack_to_install for pack_to_install in packs_to_install if pack_to_install not in IGNORED_FILES} # All filtering out of tests should be done here tests = filter_tests(tests, id_set) if not tests: logging.info(""No tests found running sanity check only"") sanity_tests = { ""Sanity Test - Playbook with no integration"", ""Sanity Test - Playbook with integration"", ""Sanity Test - Playbook with mocked integration"", ""Sanity Test - Playbook with Unmockable Integration"" } logging.debug(f""Adding sanity tests: {sanity_tests}"") tests.update(sanity_tests) logging.debug(""Adding HelloWorld to tests as most of the sanity tests requires it."") logging.debug( ""Adding Gmail to packs to install as 'Sanity Test - Playbook with Unmockable Integration' uses it"" ) packs_to_install.update([""HelloWorld"", ""Gmail""]) # We add Base andDeveloperTools packs for every build packs_to_install.update([""DeveloperTools"", ""Base""]) return tests, packs_to_install ","def get_test_list_and_content_packs_to_install(files_string, branch_name, minimum_server_version='0', conf=deepcopy(CONF), id_set=deepcopy(ID_SET)): """"""Create a test list that should run"""""" (modified_files_with_relevant_tests, modified_tests_list, changed_common, is_conf_json, sample_tests, modified_metadata_list, is_reputations_json, is_indicator_json) = get_modified_files_for_testing(files_string) all_modified_files_paths = set( modified_files_with_relevant_tests + modified_tests_list + changed_common + sample_tests ) from_version, to_version = get_from_version_and_to_version_bounderies(all_modified_files_paths, id_set, modified_metadata_list=modified_metadata_list, ) # Check if only README file in file string, if so, no need to create the servers. documentation_changes_only = is_documentation_changes_only(files_string) create_filter_envs_file(from_version, to_version, documentation_changes_only=documentation_changes_only) tests = set([]) packs_to_install = set([]) # Get packs and tests for changed scripts integration and playbooks if modified_files_with_relevant_tests: tests, packs_to_install = find_tests_and_content_packs_for_modified_files(modified_files_with_relevant_tests, conf, id_set) # Adding a unique test for a json file. if is_reputations_json: tests.add('FormattingPerformance - Test') tests.add('reputations.json Test') tests.add('Indicators reputation-.json Test') if is_indicator_json: tests.add('Test IP Indicator Fields') for file_path in modified_tests_list: test = tools.collect_ids(file_path) if test not in tests: tests.add(test) if is_conf_json: tests = tests.union(get_test_from_conf(branch_name, conf)) if changed_common: tests.add('TestCommonPython') # get all modified packs - not just tests related # TODO: need to move the logic of collecting packs of all items to be inside get_modified_files_for_testing modified_packs = get_modified_packs(files_string) if modified_packs: packs_to_install = packs_to_install.union(modified_packs) # Get packs of integrations corresponding to each test, as listed in conf.json packs_of_tested_integrations = conf.get_packs_of_tested_integrations(tests, id_set) packs_to_install = packs_to_install.union(packs_of_tested_integrations) # Get packs that contains each of the collected tests packs_of_collected_tests = get_content_pack_name_of_test(tests, id_set) packs_to_install = packs_to_install.union(packs_of_collected_tests) # All filtering out of packs should be done here packs_to_install = {pack_to_install for pack_to_install in packs_to_install if pack_to_install not in IGNORED_FILES} # All filtering out of tests should be done here tests = filter_tests(tests, id_set) if not tests: logging.info(""No tests found running sanity check only"") sanity_tests = { ""Sanity Test - Playbook with no integration"", ""Sanity Test - Playbook with integration"", ""Sanity Test - Playbook with mocked integration"", ""Sanity Test - Playbook with Unmockable Integration"" } logging.debug(f""Adding sanity tests: {sanity_tests}"") tests.update(sanity_tests) logging.debug(""Adding HelloWorld to tests as most of the sanity tests requires it."") logging.debug( ""Adding Gmail to packs to install as 'Sanity Test - Playbook with Unmockable Integration' uses it"" ) packs_to_install.update([""HelloWorld"", ""Gmail""]) # We add Base andDeveloperTools packs for every build packs_to_install.update([""DeveloperTools"", ""Base""]) return tests, packs_to_install " 43062,"def chop_in_blocks_multi(m, idtodelete): """""" Splits an array of (symmetric) matrices each into 3 blocks, A, B, C Blocks A and C are diagonal blocks and B is the offdiagonal block idtodelete specifies which indices go into C. """""" A = np.copy(m) A = np.delete(A, idtodelete, axis=1) A = np.delete(A, idtodelete, axis=2) B = np.delete(m[:, :, idtodelete], idtodelete, axis=1) C = m[:, idtodelete, :][:, :, idtodelete] return (A, B, C) ","def chop_in_blocks_multi(m, idtodelete): """""" Splits an array of (symmetric) matrices each into 3 blocks, A, B, C Blocks A and C are diagonal blocks and B is the offdiagonal block idtodelete specifies which indices go into C. """""" A = np.delete(m, idtodelete, axis=1) A = np.delete(A, idtodelete, axis=2) B = np.delete(m[:, :, idtodelete], idtodelete, axis=1) C = m[:, idtodelete, :][:, :, idtodelete] return (A, B, C) " 45771,"def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument( '--amp', action='store_true', default=False, help='enable automatic-mixed-precision training') parser.add_argument( '--auto-scale-lr', action='store_true', help='enable automatically scaling LR.') parser.add_argument( '--auto-resume', action='store_true', help='resume from the latest checkpoint automatically') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key=""[a,b]"" or key=a,b ' 'It also allows nested list/tuple values, e.g. key=""[(a,b),(c,d)]"" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) return args ","def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument( '--amp', action='store_true', default=False, help='enable automatic-mixed-precision training') parser.add_argument( '--auto-scale-lr', action='store_true', help='enable automatically scaling LR.') parser.add_argument( '--auto-resume', action='store_true', help='resume from the latest checkpoint in the work_dir automatically') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key=""[a,b]"" or key=a,b ' 'It also allows nested list/tuple values, e.g. key=""[(a,b),(c,d)]"" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) return args " 21149,"def test_attr_pipeline_checks(en_vocab): doc1 = Doc(en_vocab, words=[""Test""]) doc1[0].dep_ = ""ROOT"" doc2 = Doc(en_vocab, words=[""Test""]) doc2[0].tag_ = ""TAG"" doc2[0].pos_ = ""X"" doc2[0].morph_ = ""Feat=Val"" doc2[0].lemma_ = ""LEMMA"" doc3 = Doc(en_vocab, words=[""Test""]) # DEP requires DEP matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{""DEP"": ""a""}]]) matcher(doc1) with pytest.raises(ValueError): matcher(doc2) with pytest.raises(ValueError): matcher(doc3) # errors can be suppressed if desired matcher(doc2, suppress_errors=True) matcher(doc3, suppress_errors=True) # TAG, POS, LEMMA require those values for attr in (""TAG"", ""POS"", ""LEMMA""): matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{attr: ""a""}]]) matcher(doc2) with pytest.raises(ValueError): matcher(doc1) with pytest.raises(ValueError): matcher(doc3) # TEXT/ORTH only require tokens matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{""ORTH"": ""a""}]]) matcher(doc1) matcher(doc2) matcher(doc3) matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{""TEXT"": ""a""}]]) matcher(doc1) matcher(doc2) matcher(doc3) ","def test_attr_pipeline_checks(en_vocab): doc1 = Doc(en_vocab, words=[""Test""]) doc1[0].dep_ = ""ROOT"" doc2 = Doc(en_vocab, words=[""Test""]) doc2[0].tag_ = ""TAG"" doc2[0].pos_ = ""X"" doc2[0].morph_ = ""Feat=Val"" doc2[0].lemma_ = ""LEMMA"" doc3 = Doc(en_vocab, words=[""Test""]) # DEP requires DEP matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{""DEP"": ""a""}]]) matcher(doc1) with pytest.raises(ValueError): matcher(doc2) with pytest.raises(ValueError): matcher(doc3) # errors can be suppressed if desired matcher(doc2, allow_missing=True) matcher(doc3, suppress_errors=True) # TAG, POS, LEMMA require those values for attr in (""TAG"", ""POS"", ""LEMMA""): matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{attr: ""a""}]]) matcher(doc2) with pytest.raises(ValueError): matcher(doc1) with pytest.raises(ValueError): matcher(doc3) # TEXT/ORTH only require tokens matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{""ORTH"": ""a""}]]) matcher(doc1) matcher(doc2) matcher(doc3) matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{""TEXT"": ""a""}]]) matcher(doc1) matcher(doc2) matcher(doc3) " 26821,"def secondary_training_status_message(job_description: Optional[dict], prev_description: Optional[dict]): """""" Returns a string contains start time and the secondary training job status message. :param job_description: Returned response from DescribeTrainingJob call :type job_description: dict :param prev_description: Previous job description from DescribeTrainingJob call :type prev_description: dict :return: Job status string to be printed. """""" if job_description is None: return '' secondary_status_transitions = job_description.get('SecondaryStatusTransitions') if secondary_status_transitions is None or len(secondary_status_transitions) == 0: return '' prev_description_secondary_transitions = ( prev_description.get('SecondaryStatusTransitions') if prev_description is not None else None ) prev_transitions_num = ( len(prev_description_secondary_transitions) if prev_description_secondary_transitions is not None else 0 ) current_transitions = job_description['SecondaryStatusTransitions'] transitions_to_print = ( current_transitions[-1:] if len(current_transitions) == prev_transitions_num else current_transitions[prev_transitions_num - len(current_transitions) :] ) status_strs = [] for transition in transitions_to_print: message = transition['StatusMessage'] time_str = timezone.convert_to_utc(job_description['LastModifiedTime']).strftime('%Y-%m-%d %H:%M:%S') status_strs.append('{} {} - {}'.format(time_str, transition['Status'], message)) return '\n'.join(status_strs) ","def secondary_training_status_message(job_description: Optional[dict], prev_description: Optional[dict]): """""" Returns a string contains start time and the secondary training job status message. :param job_description: Returned response from DescribeTrainingJob call :type job_description: dict :param prev_description: Previous job description from DescribeTrainingJob call :type prev_description: dict :return: Job status string to be printed. """""" if job_description is None: return '' if len(job_description.get('SecondaryStatusTransitions', [])) == 0: return '' prev_description_secondary_transitions = ( prev_description.get('SecondaryStatusTransitions') if prev_description is not None else None ) prev_transitions_num = ( len(prev_description_secondary_transitions) if prev_description_secondary_transitions is not None else 0 ) current_transitions = job_description['SecondaryStatusTransitions'] transitions_to_print = ( current_transitions[-1:] if len(current_transitions) == prev_transitions_num else current_transitions[prev_transitions_num - len(current_transitions) :] ) status_strs = [] for transition in transitions_to_print: message = transition['StatusMessage'] time_str = timezone.convert_to_utc(job_description['LastModifiedTime']).strftime('%Y-%m-%d %H:%M:%S') status_strs.append('{} {} - {}'.format(time_str, transition['Status'], message)) return '\n'.join(status_strs) " 22600,"def TensorFlowWrapper( tensorflow_model: Any, build_model: bool = True, convert_inputs: Optional[Callable] = None, convert_outputs: Optional[Callable] = None, optimizer: Optional[Any] = None, model_class: Type[Model] = Model, input_shape: Optional[Tuple[int, ...]] = None, model_name: str = ""tensorflow"", ) -> Model[InT, OutT]: """"""Wrap a TensorFlow model, so that it has the same API as Thinc models. To optimize the model, you'll need to create a TensorFlow optimizer and call optimizer.apply_gradients after each batch. """""" assert_tensorflow_installed() if not isinstance(tensorflow_model, tf.keras.models.Model): err = f""Expected tf.keras.models.Model, got: {type(tensorflow_model)}"" raise ValueError(err) # Determine if the model is Sequential/Functional is_subclass = False try: tensorflow_model.to_json() except NotImplementedError: is_subclass = True if is_subclass: for prop_name in [""catalogue_name"", ""eg_x"", ""eg_y"", ""eg_shape""]: if not hasattr(tensorflow_model, prop_name): raise ValueError( ""Keras subclassed models are not whole-model serializable by "" ""Tensorflow. To work around this, you must decorate your keras "" ""model subclasses with the 'keras_subclass' decorator. The decorator "" ""requires a single X/Y input of fake-data that can be used to initialize "" ""your subclass model properly when loading the saved version."" ) # Attach the input shape if it's not provided if input_shape is None: input_shape = tensorflow_model.eg_shape # Building a TF model checks for errors like not specifying an input_shape # which can cause other errors in methods like from_disk and from_bytes. if build_model: tensorflow_model.build(input_shape=input_shape) if convert_inputs is None: convert_inputs = _convert_inputs if convert_outputs is None: convert_outputs = _convert_outputs return model_class( model_name, forward, shims=[TensorFlowShim(tensorflow_model, optimizer=optimizer)], attrs={""convert_inputs"": convert_inputs, ""convert_outputs"": convert_outputs}, ) ","def TensorFlowWrapper( tensorflow_model: Any, build_model: bool = True, convert_inputs: Optional[Callable] = None, convert_outputs: Optional[Callable] = None, optimizer: Optional[Any] = None, model_class: Type[Model] = Model, input_shape: Optional[Tuple[int, ...]] = None, model_name: str = ""tensorflow"", ) -> Model[InT, OutT]: """"""Wrap a TensorFlow model, so that it has the same API as Thinc models. To optimize the model, you'll need to create a TensorFlow optimizer and call optimizer.apply_gradients after each batch. """""" assert_tensorflow_installed() if not isinstance(tensorflow_model, tf.keras.models.Model): err = f""Expected tf.keras.models.Model, got: {type(tensorflow_model)}"" raise ValueError(err) # Determine if the model is Sequential/Functional is_subclass = False try: tensorflow_model.to_json() except NotImplementedError: is_subclass = True if is_subclass: for prop_name in [""catalogue_name"", ""eg_x"", ""eg_y"", ""eg_shape""]: if not hasattr(tensorflow_model, prop_name): raise ValueError( ""Keras subclassed models are not whole-model serializable by "" ""TensorFlow. To work around this, you must decorate your keras "" ""model subclasses with the 'keras_subclass' decorator. The decorator "" ""requires a single X/Y input of fake-data that can be used to initialize "" ""your subclass model properly when loading the saved version."" ) # Attach the input shape if it's not provided if input_shape is None: input_shape = tensorflow_model.eg_shape # Building a TF model checks for errors like not specifying an input_shape # which can cause other errors in methods like from_disk and from_bytes. if build_model: tensorflow_model.build(input_shape=input_shape) if convert_inputs is None: convert_inputs = _convert_inputs if convert_outputs is None: convert_outputs = _convert_outputs return model_class( model_name, forward, shims=[TensorFlowShim(tensorflow_model, optimizer=optimizer)], attrs={""convert_inputs"": convert_inputs, ""convert_outputs"": convert_outputs}, ) " 30826,"def get_security_profiles_command(): """""" Get information about profiles. """""" security_profile = demisto.args().get('security_profile') if security_profile: xpath = f'{XPATH_RULEBASE}profiles/{security_profile}' else: xpath = f'{XPATH_RULEBASE}profiles' result = get_security_profile(xpath) if security_profile: security_profiles = result.get('response', {}).get('result', {}) else: security_profiles = result.get('response', {}).get('result', {}).get('profiles', {}) if '@dirtyId' in security_profiles: LOG(f'Found uncommitted item:\n{security_profiles}') raise Exception('Please commit the instance prior to getting the security profiles.') human_readable = '' content: List[Dict[str, Any]] = [] context = {} if 'spyware' in security_profiles: profiles = security_profiles.get('spyware').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) spyware_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': spyware_rules }) else: rules = profiles.get('rules', {}).get('entry', []) spyware_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': spyware_rules } human_readable = tableToMarkdown('Anti Spyware Profiles', content) context.update({""Panorama.Spyware(val.Name == obj.Name)"": content}) if 'virus' in security_profiles: profiles = security_profiles.get('virus').get('entry', []) if isinstance(profiles, list): for profile in profiles: rules = profile.get('decoder', {}).get('entry', []) antivirus_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Decoder': antivirus_rules }) else: rules = profiles.get('decoder', {}).get('entry', []) antivirus_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': antivirus_rules } human_readable += tableToMarkdown('Antivirus Profiles', content) context.update({""Panorama.Antivirus(val.Name == obj.Name)"": content}) if 'file-blocking' in security_profiles: profiles = security_profiles.get('file-blocking').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) file_blocking_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': file_blocking_rules }) else: rules = profiles.get('rules', {}).get('entry', []) file_blocking_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': file_blocking_rules } human_readable += tableToMarkdown('File Blocking Profiles', content) context.update({""Panorama.FileBlocking(val.Name == obj.Name)"": content}) if 'vulnerability' in security_profiles: profiles = security_profiles.get('vulnerability').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) vulnerability_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': vulnerability_rules }) else: rules = profiles.get('rules', {}).get('entry', []) vulnerability_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': vulnerability_rules } human_readable += tableToMarkdown('vulnerability Protection Profiles', content) context.update({""Panorama.Vulnerability(val.Name == obj.Name)"": content}) if 'data-filtering' in security_profiles: profiles = security_profiles.get('data-filtering').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) data_filtering_rules = prettify_data_filtering_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': data_filtering_rules }) else: rules = profiles.get('rules', {}).get('entry', []) data_filtering_rules = prettify_data_filtering_rules(rules) content = { 'Name': profiles['@name'], 'Rules': data_filtering_rules } human_readable += tableToMarkdown('Data Filtering Profiles', content) context.update({""Panorama.DataFiltering(val.Name == obj.Name)"": content}) if 'url-filtering' in security_profiles: profiles = security_profiles.get('url-filtering').get('entry', []) if isinstance(profiles, list): for profile in profiles: url_filtering_rules = prettify_get_url_filter(profile) content.append({ 'Name': profile['@name'], 'Rules': url_filtering_rules }) else: url_filtering_rules = prettify_get_url_filter(profiles) content = { 'Name': profiles['@name'], 'Rules': url_filtering_rules } human_readable += tableToMarkdown('URL Filtering Profiles', content) context.update({""Panorama.URLFilter(val.Name == obj.Name)"": content}) if 'wildfire-analysis' in security_profiles: profiles = security_profiles.get('wildfire-analysis').get('entry', []) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) wildfire_rules = prettify_wildfire_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': wildfire_rules }) else: rules = profiles.get('rules', {}).get('entry', []) wildfire_rules = prettify_wildfire_rules(rules) content = { 'Name': profiles['@name'], 'Rules': wildfire_rules } human_readable += tableToMarkdown('WildFire Profiles', content) context.update({""Panorama.WildFire(val.Name == obj.Name)"": content}) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': context }) ","def get_security_profiles_command(): """""" Get information about profiles. """""" security_profile = demisto.args().get('security_profile') if security_profile: xpath = f'{XPATH_RULEBASE}profiles/{security_profile}' else: xpath = f'{XPATH_RULEBASE}profiles' result = get_security_profile(xpath) if security_profile: security_profiles = result.get('response', {}).get('result', {}) else: security_profiles = result.get('response', {}).get('result', {}).get('profiles', {}) if '@dirtyId' in security_profiles: LOG(f'Found uncommitted item:\n{security_profiles}') raise Exception('Please commit the instance prior to getting the security profiles.') human_readable = '' content: List[Dict[str, Any]] = [] context = {} if 'spyware' in security_profiles: profiles = security_profiles.get('spyware', {}).get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) spyware_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': spyware_rules }) else: rules = profiles.get('rules', {}).get('entry', []) spyware_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': spyware_rules } human_readable = tableToMarkdown('Anti Spyware Profiles', content) context.update({""Panorama.Spyware(val.Name == obj.Name)"": content}) if 'virus' in security_profiles: profiles = security_profiles.get('virus').get('entry', []) if isinstance(profiles, list): for profile in profiles: rules = profile.get('decoder', {}).get('entry', []) antivirus_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Decoder': antivirus_rules }) else: rules = profiles.get('decoder', {}).get('entry', []) antivirus_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': antivirus_rules } human_readable += tableToMarkdown('Antivirus Profiles', content) context.update({""Panorama.Antivirus(val.Name == obj.Name)"": content}) if 'file-blocking' in security_profiles: profiles = security_profiles.get('file-blocking').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) file_blocking_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': file_blocking_rules }) else: rules = profiles.get('rules', {}).get('entry', []) file_blocking_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': file_blocking_rules } human_readable += tableToMarkdown('File Blocking Profiles', content) context.update({""Panorama.FileBlocking(val.Name == obj.Name)"": content}) if 'vulnerability' in security_profiles: profiles = security_profiles.get('vulnerability').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) vulnerability_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': vulnerability_rules }) else: rules = profiles.get('rules', {}).get('entry', []) vulnerability_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': vulnerability_rules } human_readable += tableToMarkdown('vulnerability Protection Profiles', content) context.update({""Panorama.Vulnerability(val.Name == obj.Name)"": content}) if 'data-filtering' in security_profiles: profiles = security_profiles.get('data-filtering').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) data_filtering_rules = prettify_data_filtering_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': data_filtering_rules }) else: rules = profiles.get('rules', {}).get('entry', []) data_filtering_rules = prettify_data_filtering_rules(rules) content = { 'Name': profiles['@name'], 'Rules': data_filtering_rules } human_readable += tableToMarkdown('Data Filtering Profiles', content) context.update({""Panorama.DataFiltering(val.Name == obj.Name)"": content}) if 'url-filtering' in security_profiles: profiles = security_profiles.get('url-filtering').get('entry', []) if isinstance(profiles, list): for profile in profiles: url_filtering_rules = prettify_get_url_filter(profile) content.append({ 'Name': profile['@name'], 'Rules': url_filtering_rules }) else: url_filtering_rules = prettify_get_url_filter(profiles) content = { 'Name': profiles['@name'], 'Rules': url_filtering_rules } human_readable += tableToMarkdown('URL Filtering Profiles', content) context.update({""Panorama.URLFilter(val.Name == obj.Name)"": content}) if 'wildfire-analysis' in security_profiles: profiles = security_profiles.get('wildfire-analysis').get('entry', []) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) wildfire_rules = prettify_wildfire_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': wildfire_rules }) else: rules = profiles.get('rules', {}).get('entry', []) wildfire_rules = prettify_wildfire_rules(rules) content = { 'Name': profiles['@name'], 'Rules': wildfire_rules } human_readable += tableToMarkdown('WildFire Profiles', content) context.update({""Panorama.WildFire(val.Name == obj.Name)"": content}) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': context }) " 59056,"def _is_empty_list(node): """""" Checks if a node is a `List` node with an empty list for `elements`, including any nested `List` nodes. Arguments --------- node: vy_ast.VyperNode A Vyper node Returns ------- bool Boolean value indicating if the node is an empty `List` node. """""" if not isinstance(node, vy_ast.List): return False if any(isinstance(i, vy_ast.List) for i in node.elements): return any(_is_empty_list(i) for i in node.elements) return all(isinstance(i, vy_ast.List) and not i.elements for i in node.elements) ","def _is_empty_list(node): """""" Checks if a node is a `List` node with an empty list for `elements`, including any nested `List` nodes. Arguments --------- node: vy_ast.VyperNode A Vyper node Returns ------- bool Boolean value indicating if the node is an empty `List` node. """""" if not isinstance(node, vy_ast.List): return False if not node.elements: return True return all(_is_empty_list(t) for t in node.elements) " 32139,"def remove_duplicates_from_list_arg(args, field): """""" Removes duplicates from a dict after calling argToList. For example: args: {'ids': ""1,2,1""} , field='ids' The return output will be [""1"",""2""] :type args: dict :param args: Args to be converted (required) :type field: str :param field: Field in args to be converted into list without duplicates (required) :return: A python list of args without duplicates :rtype: ``list`` """""" convert_to_list = argToList(args.get(field)) return list(set(convert_to_list)) ","def remove_duplicates_from_list_arg(args, field): """""" Removes duplicates from a dict after calling argToList. For example: args: {'ids': ""1,2,1""} , field='ids' The return output will be [""1"",""2""] :type args: ``dict`` :param args: Args to be converted (required) :type field: str :param field: Field in args to be converted into list without duplicates (required) :return: A python list of args without duplicates :rtype: ``list`` """""" convert_to_list = argToList(args.get(field)) return list(set(convert_to_list)) " 5550,"def check_axis(var, *axes): """"""Check if the criteria for any of the given axes is satisfied. Parameters ---------- var : `xarray.DataArray` DataArray belonging to the coordinate to be checked axes : str Axis type(s) to check for. Currently can check for 'time', 'vertical', 'y', 'lat', 'x', and 'lon'. """""" for axis in axes: # Check for # - standard name (CF option) # - _CoordinateAxisType (from THREDDS) # - axis (CF option) # - positive (CF standard for non-pressure vertical coordinate) for criterion in ('standard_name', '_CoordinateAxisType', 'axis', 'positive'): if (var.attrs.get(criterion, 'absent') in coordinate_criteria[criterion].get(axis, set())): return True # Check for units, either by dimensionality or name try: if (axis in coordinate_criteria['units'] and ( ( coordinate_criteria['units'][axis]['match'] == 'dimensionality' and (units.get_dimensionality(var.attrs.get('units')) == units.get_dimensionality( coordinate_criteria['units'][axis]['units'])) ) or ( coordinate_criteria['units'][axis]['match'] == 'name' and var.attrs.get('units') in coordinate_criteria['units'][axis]['units'] ))): return True except UndefinedUnitError: pass # Check if name matches regular expression (non-CF failsafe) if re.match(coordinate_criteria['regular_expression'][axis], var.name.lower()): return True # If no match has been made, return False (rather than None) return False ","def check_axis(var, *axes): """"""Check if the criteria for any of the given axes are satisfied. Parameters ---------- var : `xarray.DataArray` DataArray belonging to the coordinate to be checked axes : str Axis type(s) to check for. Currently can check for 'time', 'vertical', 'y', 'lat', 'x', and 'lon'. """""" for axis in axes: # Check for # - standard name (CF option) # - _CoordinateAxisType (from THREDDS) # - axis (CF option) # - positive (CF standard for non-pressure vertical coordinate) for criterion in ('standard_name', '_CoordinateAxisType', 'axis', 'positive'): if (var.attrs.get(criterion, 'absent') in coordinate_criteria[criterion].get(axis, set())): return True # Check for units, either by dimensionality or name try: if (axis in coordinate_criteria['units'] and ( ( coordinate_criteria['units'][axis]['match'] == 'dimensionality' and (units.get_dimensionality(var.attrs.get('units')) == units.get_dimensionality( coordinate_criteria['units'][axis]['units'])) ) or ( coordinate_criteria['units'][axis]['match'] == 'name' and var.attrs.get('units') in coordinate_criteria['units'][axis]['units'] ))): return True except UndefinedUnitError: pass # Check if name matches regular expression (non-CF failsafe) if re.match(coordinate_criteria['regular_expression'][axis], var.name.lower()): return True # If no match has been made, return False (rather than None) return False " 58702,"def get_valid_config( config: Optional[Text], mandatory_keys: List[Text], default_config: Text = DEFAULT_CONFIG_PATH, ) -> Text: """"""Get a config from a config file and check if it is valid. Exit if the config isn't valid. Args: config: Path to the config file. mandatory_keys: The keys that have to be specified in the config file. default_config: default config to use if the file at `config` doesn't exist. Returns: The path to the config file if the config is valid. """""" config = rasa.cli.utils.get_validated_path(config, ""config"", default_config) if not os.path.exists(config): print_error( ""The config file '{}' does not exist. Use '--config' to specify a "" ""valid config file."" """".format(config) ) sys.exit(1) missing_keys = rasa.cli.utils.missing_config_keys(config, mandatory_keys) if missing_keys: print_error( ""The config file '{}' is missing mandatory parameters: "" ""'{}'. Add missing parameters to config file and try again."" """".format(config, ""', '"".join(missing_keys)) ) sys.exit(1) return config ","def get_valid_config( config: Optional[Text], mandatory_keys: List[Text], default_config: Text = DEFAULT_CONFIG_PATH, ) -> Text: """"""Get a config from a config file and check if it is valid. Exit if the config isn't valid. Args: config: Path to the config file. mandatory_keys: The keys that have to be specified in the config file. default_config: default config to use if the file at `config` doesn't exist. Returns: The path to the config file if the config is valid. """""" config = rasa.cli.utils.get_validated_path(config, ""config"", default_config) if not os.path.exists(config): print_error( ""The config file '{}' does not exist. Use '--config' to specify a "" ""valid config file."" """".format(config) ) sys.exit(1) missing_keys = rasa.cli.utils.missing_config_keys(config, mandatory_keys) if missing_keys: print_error( ""The config file '{}' is missing mandatory parameters: "" ""'{}'. Add missing parameters to config file and try again."" """".format(config, ""', '"".join(missing_keys)) ) sys.exit(1) return config " 36256,"def highly_variable_genes_seurat_v3( adata: AnnData, n_top_genes: int = 2000, batch_key: Optional[str] = None, lowess_frac: Optional[float] = 0.15, ): """"""\ Annotate highly variable genes [Stuart19]_. Expects raw count data. The major difference in this implementation is the use of lowess insted of loess. For further details of the sparse arithmetic see https://www.overleaf.com/read/ckptrbgzzzpg Parameters ---------- adata The annotated data matrix of shape `n_obs` × `n_vars`. Rows correspond to cells and columns to genes. n_top_genes Number of highly-variable genes to keep. batch_key If specified, highly-variable genes are selected within each batch separately and merged. This simple process avoids the selection of batch-specific genes and acts as a lightweight batch correction method. lowess_frac The fraction of the data (cells) used when estimating the variance in the lowess model fit. """""" import statsmodels lowess = statsmodels.nonparametric.lowess if batch_key is None: batch_info = pd.Categorical(np.zeros((adata.X.shape[0])).astype(int)) else: batch_info = adata.obs[batch_key] norm_gene_vars = [] for b in np.unique(batch_info): mean, var = materialize_as_ndarray(_get_mean_var(adata[batch_info == b].X)) not_const = var > 0 estimat_var = np.zeros((adata.X.shape[1])) y = np.log10(var[not_const]) x = np.log10(mean[not_const]) # output is sorted by x v = lowess(y, x, frac=lowess_frac) estimat_var[not_const][np.argsort(x)] = v[:, 1] # get normalized variance reg_std = np.sqrt(10 ** estimat_var) batch_counts = adata[batch_info == b].X.copy() # clip large values as in Seurat N = np.sum(batch_info == b) vmax = np.sqrt(N) clip_val = reg_std * vmax + mean # could be something faster here for g in range(batch_counts.shape[1]): batch_counts[:, g][batch_counts[:, g] > vmax] = clip_val[g] if sp_sparse.issparse(batch_counts): squared_batch_counts_sum = np.array(batch_counts.power(2).sum(axis=0)) batch_counts_sum = np.array(batch_counts.sum(axis=0)) else: squared_batch_counts_sum = np.square(batch_counts).sum(axis=0) batch_counts_sum = batch_counts.sum(axis=0) norm_gene_var = (1 / ((N - 1) * np.square(reg_std))) * ( (N * np.square(mean)) + squared_batch_counts_sum - 2 * batch_counts_sum * mean ) norm_gene_vars.append(norm_gene_var.reshape(1, -1)) norm_gene_vars = np.concatenate(norm_gene_vars, axis=0) # argsort twice gives ranks ranked_norm_gene_vars = np.argsort(np.argsort(norm_gene_vars, axis=1), axis=1) median_ranked = np.median(ranked_norm_gene_vars, axis=0) num_batches_high_var = np.sum( ranked_norm_gene_vars >= (adata.X.shape[1] - n_top_genes), axis=0 ) df = pd.DataFrame(index=np.array(adata.var_names)) df[""highly_variable_nbatches""] = num_batches_high_var df[""highly_variable_median_rank""] = median_ranked df.sort_values( [""highly_variable_nbatches"", ""highly_variable_median_rank""], ascending=False, na_position=""last"", inplace=True, ) df[""highly_variable""] = False df.loc[:n_top_genes, ""highly_variable""] = True df = df.loc[adata.var_names] adata.var[""highly_variable""] = df[""highly_variable""].values if batch_key is not None: batches = adata.obs[batch_key].cat.categories adata.var[""highly_variable_nbatches""] = df[""highly_variable_nbatches""].values adata.var[""highly_variable_intersection""] = df[ ""highly_variable_nbatches"" ] == len(batches) adata.var[""highly_variable_median_rank""] = df[""highly_variable_median_rank""].values ","def highly_variable_genes_seurat_v3( adata: AnnData, n_top_genes: int = 2000, batch_key: Optional[str] = None, lowess_frac: Optional[float] = 0.15, ): """"""\ Annotate highly variable genes [Stuart19]_. Expects raw count data. The major difference in this implementation is the use of lowess insted of loess. For further details of the sparse arithmetic see https://www.overleaf.com/read/ckptrbgzzzpg Parameters ---------- adata The annotated data matrix of shape `n_obs` × `n_vars`. Rows correspond to cells and columns to genes. n_top_genes Number of highly-variable genes to keep. batch_key If specified, highly-variable genes are selected within each batch separately and merged. This simple process avoids the selection of batch-specific genes and acts as a lightweight batch correction method. lowess_frac The fraction of the data (cells) used when estimating the variance in the lowess model fit. """""" import statsmodels lowess = statsmodels.nonparametric.lowess if batch_key is None: batch_info = pd.Categorical(np.zeros(adata.shape[0], dtype=int)) else: batch_info = adata.obs[batch_key] norm_gene_vars = [] for b in np.unique(batch_info): mean, var = materialize_as_ndarray(_get_mean_var(adata[batch_info == b].X)) not_const = var > 0 estimat_var = np.zeros((adata.X.shape[1])) y = np.log10(var[not_const]) x = np.log10(mean[not_const]) # output is sorted by x v = lowess(y, x, frac=lowess_frac) estimat_var[not_const][np.argsort(x)] = v[:, 1] # get normalized variance reg_std = np.sqrt(10 ** estimat_var) batch_counts = adata[batch_info == b].X.copy() # clip large values as in Seurat N = np.sum(batch_info == b) vmax = np.sqrt(N) clip_val = reg_std * vmax + mean # could be something faster here for g in range(batch_counts.shape[1]): batch_counts[:, g][batch_counts[:, g] > vmax] = clip_val[g] if sp_sparse.issparse(batch_counts): squared_batch_counts_sum = np.array(batch_counts.power(2).sum(axis=0)) batch_counts_sum = np.array(batch_counts.sum(axis=0)) else: squared_batch_counts_sum = np.square(batch_counts).sum(axis=0) batch_counts_sum = batch_counts.sum(axis=0) norm_gene_var = (1 / ((N - 1) * np.square(reg_std))) * ( (N * np.square(mean)) + squared_batch_counts_sum - 2 * batch_counts_sum * mean ) norm_gene_vars.append(norm_gene_var.reshape(1, -1)) norm_gene_vars = np.concatenate(norm_gene_vars, axis=0) # argsort twice gives ranks ranked_norm_gene_vars = np.argsort(np.argsort(norm_gene_vars, axis=1), axis=1) median_ranked = np.median(ranked_norm_gene_vars, axis=0) num_batches_high_var = np.sum( ranked_norm_gene_vars >= (adata.X.shape[1] - n_top_genes), axis=0 ) df = pd.DataFrame(index=np.array(adata.var_names)) df[""highly_variable_nbatches""] = num_batches_high_var df[""highly_variable_median_rank""] = median_ranked df.sort_values( [""highly_variable_nbatches"", ""highly_variable_median_rank""], ascending=False, na_position=""last"", inplace=True, ) df[""highly_variable""] = False df.loc[:n_top_genes, ""highly_variable""] = True df = df.loc[adata.var_names] adata.var[""highly_variable""] = df[""highly_variable""].values if batch_key is not None: batches = adata.obs[batch_key].cat.categories adata.var[""highly_variable_nbatches""] = df[""highly_variable_nbatches""].values adata.var[""highly_variable_intersection""] = df[ ""highly_variable_nbatches"" ] == len(batches) adata.var[""highly_variable_median_rank""] = df[""highly_variable_median_rank""].values " 14870,"def setup_platform(hass, config, add_entities, discovery_info=None) -> None: """"""Set up the Nextcloud sensor."""""" try: # Fetch Nextcloud Monitor api data ncm = NextcloudMonitor( config[CONF_URL], config[CONF_USERNAME], config[CONF_PASSWORD] ) if len(ncm.data) < 1: _LOGGER.error( ""No data returned from Nextcloud API. Check your user/password."" ) hass.data[DOMAIN] = ncm.data def nextcloud_update(event_time): """"""Update data from nextcloud api."""""" ncm.update() hass.data[DOMAIN] = ncm.data # Update sensors on time interval track_time_interval(hass, nextcloud_update, config[CONF_SCAN_INTERVAL]) # Create list of sensors based on available nextcloud api data sensors = [] for name, dict_path in NEXTCLOUD_ITEMS_EXPECTED.items(): try: sensors.append(NextcloudSensor(name, dict_path)) except: # noqa: E722 pylint: disable=bare-except _LOGGER.warning( ""%s sensor information was not provided by Nextcloud"", name ) continue # Setup sensors add_entities(sensors, True) except: # noqa: E722 pylint: disable=bare-except raise PlatformNotReady ","def setup_platform(hass, config, add_entities, discovery_info=None) -> None: """"""Set up the Nextcloud sensor."""""" try: # Fetch Nextcloud Monitor api data ncm = NextcloudMonitor( config[CONF_URL], config[CONF_USERNAME], config[CONF_PASSWORD] ) if not ncm.data: _LOGGER.error( ""No data returned from Nextcloud API. Check your user/password."" ) hass.data[DOMAIN] = ncm.data def nextcloud_update(event_time): """"""Update data from nextcloud api."""""" ncm.update() hass.data[DOMAIN] = ncm.data # Update sensors on time interval track_time_interval(hass, nextcloud_update, config[CONF_SCAN_INTERVAL]) # Create list of sensors based on available nextcloud api data sensors = [] for name, dict_path in NEXTCLOUD_ITEMS_EXPECTED.items(): try: sensors.append(NextcloudSensor(name, dict_path)) except: # noqa: E722 pylint: disable=bare-except _LOGGER.warning( ""%s sensor information was not provided by Nextcloud"", name ) continue # Setup sensors add_entities(sensors, True) except: # noqa: E722 pylint: disable=bare-except raise PlatformNotReady " 17807,"def makeExampleMesh(): dh = 5. hx = [(dh, 5, -1.3), (dh, 20), (dh, 5, 1.3)] hy = [(dh, 5, -1.3), (dh, 20), (dh, 5, 1.3)] hz = [(dh, 5, -1.3), (dh, 20), (dh, 5, 1.3)] mesh = Mesh.TensorMesh([hx, hy, hz], 'CCC') return mesh ","def make_example_mesh(dh=5, npad=5, ncore=20): dh = 5. hx = [(dh, 5, -1.3), (dh, 20), (dh, 5, 1.3)] hy = [(dh, 5, -1.3), (dh, 20), (dh, 5, 1.3)] hz = [(dh, 5, -1.3), (dh, 20), (dh, 5, 1.3)] mesh = Mesh.TensorMesh([hx, hy, hz], 'CCC') return mesh " 28604,"def plot_ts( idata, y, x=None, y_hat=None, y_holdout=None, y_forecasts=None, x_holdout=None, plot_dim=None, holdout_dim=None, num_samples=100, backend=None, backend_kwargs=None, y_kwargs=None, y_hat_plot_kwargs=None, y_mean_plot_kwargs=None, vline_kwargs=None, textsize=None, figsize=None, legend=True, axes=None, show=None, ): """"""Plot timeseries data. Parameters ---------- idata : InferenceData InferenceData object. y : str Variable name from ``observed_data``. Values to be plotted on y-axis before holdout. x : str, Optional Values to be plotted on x-axis before holdout. If None, coordinates of ``y`` dims is chosen. y_hat : str, optional Variable name from ``posterior_predictive``. Assumed to be of shape ``(chain, draw, *y_dims)``. y_holdout : str, optional Variable name from ``observed_data``. It represents the observed data after the holdout period. Useful while testing the model, when you want to compare observed test data with predictions/forecasts. y_forecasts : str, optional Variable name from ``posterior_predictive``. It represents forecasts (posterior predictive) values after holdout period. Useful to compare observed vs predictions/forecasts. Assumed shape ``(chain, draw, *shape)``. x_holdout : str, Defaults to coordinates of ``y``. Variable name from ``constant_data``. If None, coordinates of ``y_holdout`` or coordinates of ``y_forecast`` (either of the two available) is chosen. plot_dim: str, Optional Should be present in ``y.dims``. Necessary for selection of ``x`` if ``x`` is None and ``y`` is multidimensional. holdout_dim: str, Optional Should be present in ``y_holdout.dims`` or ``y_forecats.dims``. Necessary to choose ``x_holdout`` if ``x`` is None and if ``y_holdout`` or ``y_forecasts`` is multidimensional. num_samples : int, default 100 Number of posterior predictive samples drawn from ``y_hat`` and ``y_forecasts``. backend : {""matplotlib"", ""bokeh""}, default ""matplotlib"" Select plotting backend. y_kwargs : dict, optional Passed to :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib. y_hat_plot_kwargs : dict, optional Passed to :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib. y_mean_plot_kwargs : dict, optional Passed to :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib. vline_kwargs : dict, optional Passed to :meth:`mpl:matplotlib.axes.Axes.axvline` in matplotlib. backend_kwargs : dict, optional These are kwargs specific to the backend being used. Passed to :func: `mpl:matplotlib.pyplot.subplots`. figsize : tuple, optional Figure size. If None, it will be defined automatically. textsize : float, optional Text size scaling factor for labels, titles and lines. If None, it will be autoscaled based on ``figsize``. Returns ------- axes: matplotlib axes or bokeh figures. See Also -------- plot_lm : Posterior predictive and mean plots for regression-like data. plot_ppc : Plot for posterior/prior predictive checks. Examples -------- Plot timeseries default plot .. plot:: :context: close-figs >>> import arviz as az >>> nchains, ndraws = (4, 500) >>> obs_data = { ... ""y"": 2 * np.arange(1, 9) + 3, ... ""z"": 2 * np.arange(8, 12) + 3, ... } >>> posterior_predictive = { ... ""y"": np.random.normal( ... (obs_data[""y""] * 1.2) - 3, size=(nchains, ndraws, len(obs_data[""y""])) ... ), ... ""z"": np.random.normal( ... (obs_data[""z""] * 1.2) - 3, size=(nchains, ndraws, len(obs_data[""z""])) ... ), ... } >>> idata = az.from_dict( ... observed_data=obs_data, ... posterior_predictive=posterior_predictive, ... coords={""obs_dim"": np.arange(1, 9), ""pred_dim"": np.arange(8, 12)}, ... dims={""y"": [""obs_dim""], ""z"": [""pred_dim""]}, ... ) >>> ax = az.plot_ts(idata=idata, y=""y"", y_holdout=""z"") Plot timeseries multidim plot .. plot:: :context: close-figs >>> ndim1, ndim2 = (5, 7) >>> data = { ... ""y"": np.random.normal(size=(ndim1, ndim2)), ... ""z"": np.random.normal(size=(ndim1, ndim2)), ... } >>> posterior_predictive = { ... ""y"": np.random.randn(nchains, ndraws, ndim1, ndim2), ... ""z"": np.random.randn(nchains, ndraws, ndim1, ndim2), ... } >>> const_data = {""x"": np.arange(1, 6), ""x_pred"": np.arange(5, 10)} >>> idata = az.from_dict( ... observed_data=data, ... posterior_predictive=posterior_predictive, ... constant_data=const_data, ... dims={ ... ""y"": [""dim1"", ""dim2""], ... ""z"": [""holdout_dim1"", ""holdout_dim2""], ... }, ... coords={ ... ""dim1"": range(ndim1), ... ""dim2"": range(ndim2), ... ""holdout_dim1"": range(ndim1 - 1, ndim1 + 4), ... ""holdout_dim2"": range(ndim2 - 1, ndim2 + 6), ... }, ... ) >>> az.plot_ts( ... idata=idata, ... y=""y"", ... plot_dim=""dim1"", ... y_holdout=""z"", ... holdout_dim=""holdout_dim1"", ... ) """""" # Assign default values if none is provided y_hat = y if y_hat is None and isinstance(y, str) else y_hat y_forecasts = y_holdout if y_forecasts is None and isinstance(y_holdout, str) else y_forecasts # holdout_dim = plot_dim if holdout_dim is None and plot_dim is not None else holdout_dim if isinstance(y, str): y = idata.observed_data[y] if isinstance(y_holdout, str): y_holdout = idata.observed_data[y_holdout] if len(y.dims) > 1 and plot_dim is None: raise ValueError(""Argument plot_dim is needed in case of multidimensional data"") if y_holdout is not None and len(y_holdout.dims) > 1 and holdout_dim is None: raise ValueError(""Argument holdout_dim is needed in case of multidimensional data"") # Assigning values to x x_var_names = None if isinstance(x, str): x = idata.constant_data[x] elif isinstance(x, tuple): x_var_names = x x = idata.constant_data elif x is None: if plot_dim is None: x = y.coords[y.dims[0]] else: x = y.coords[plot_dim] # If posterior_predictive is present in idata and y_hat is there, get its values if isinstance(y_hat, str): if ""posterior_predictive"" not in idata.groups(): warnings.warn(""posterior_predictive not found in idata"", UserWarning) y_hat = None elif hasattr(idata.posterior_predictive, y_hat): y_hat = idata.posterior_predictive[y_hat] else: warnings.warn(""y_hat not found in posterior_predictive"", UserWarning) y_hat = None # If posterior_predictive is present in idata and y_forecasts is there, get its values x_holdout_var_names = None if isinstance(y_forecasts, str): if ""posterior_predictive"" not in idata.groups(): warnings.warn(""posterior_predictive not found in idata"", UserWarning) y_forecasts = None elif hasattr(idata.posterior_predictive, y_forecasts): y_forecasts = idata.posterior_predictive[y_forecasts] else: warnings.warn(""y_hat not found in posterior_predictive"", UserWarning) y_forecasts = None # Assign values to y_holdout if isinstance(y_holdout, str): y_holdout = idata.observed_data[y_holdout] # Assign values to x_holdout. if y_holdout is not None or y_forecasts is not None: if x_holdout is None: if holdout_dim is None: if y_holdout is None: x_holdout = y_forecasts.coords[y_forecasts.dims[-1]] else: x_holdout = y_holdout.coords[y_holdout.dims[-1]] else: if y_holdout is None: x_holdout = y_forecasts.coords[holdout_dim] else: x_holdout = y_holdout.coords[holdout_dim] elif isinstance(x_holdout, str): x_holdout = idata.constant_data[x_holdout] elif isinstance(x_holdout, tuple): x_holdout_var_names = x_holdout x_holdout = idata.constant_data # Choose dims to generate y plotters if plot_dim is None: skip_dims = list(y.dims) elif isinstance(plot_dim, str): skip_dims = [plot_dim] elif isinstance(plot_dim, tuple): skip_dims = list(plot_dim) # Choose dims to generate y_holdout plotters if holdout_dim is None: if y_holdout is not None: skip_holdout_dims = list(y_holdout.dims) elif y_forecasts is not None: skip_holdout_dims = list(y_forecasts.dims) elif isinstance(holdout_dim, str): skip_holdout_dims = [holdout_dim] elif isinstance(holdout_dim, tuple): skip_holdout_dims = list(holdout_dim) # Compulsory plotters y_plotters = list( xarray_var_iter( y, skip_dims=set(skip_dims), combined=True, ) ) # Compulsory plotters x_plotters = list( xarray_var_iter( x, var_names=x_var_names, skip_dims=set(x.dims), combined=True, ) ) # Necessary when multidim y # If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs len_y = len(y_plotters) len_x = len(x_plotters) length_plotters = len_x * len_y y_plotters = np.tile(y_plotters, (len_x, 1)) x_plotters = np.tile(x_plotters, (len_y, 1)) # Generate plotters for all the available data y_mean_plotters = None y_hat_plotters = None if y_hat is not None: total_samples = y_hat.sizes[""chain""] * y_hat.sizes[""draw""] pp_sample_ix = np.random.choice(total_samples, size=num_samples, replace=False) y_hat_satcked = y_hat.stack(__sample__=(""chain"", ""draw""))[..., pp_sample_ix] y_hat_plotters = list( xarray_var_iter( y_hat_satcked, skip_dims=set(skip_dims + [""__sample__""]), combined=True, ) ) y_mean = y_hat.mean((""chain"", ""draw"")) y_mean_plotters = list( xarray_var_iter( y_mean, skip_dims=set(skip_dims), combined=True, ) ) # Necessary when multidim y # If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs y_hat_plotters = np.tile(y_hat_plotters, (len_x, 1)) y_mean_plotters = np.tile(y_mean_plotters, (len_x, 1)) y_holdout_plotters = None x_holdout_plotters = None if y_holdout is not None: y_holdout_plotters = list( xarray_var_iter( y_holdout, skip_dims=set(skip_holdout_dims), combined=True, ) ) x_holdout_plotters = list( xarray_var_iter( x_holdout, var_names=x_holdout_var_names, skip_dims=set(x_holdout.dims), combined=True, ) ) # Necessary when multidim y # If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs y_holdout_plotters = np.tile(y_holdout_plotters, (len_x, 1)) x_holdout_plotters = np.tile(x_holdout_plotters, (len_y, 1)) y_forecasts_plotters = None y_forecasts_mean_plotters = None if y_forecasts is not None: total_samples = y_forecasts.sizes[""chain""] * y_forecasts.sizes[""draw""] pp_sample_ix = np.random.choice(total_samples, size=num_samples, replace=False) y_forecasts_satcked = y_forecasts.stack(__sample__=(""chain"", ""draw""))[..., pp_sample_ix] y_forecasts_plotters = list( xarray_var_iter( y_forecasts_satcked, skip_dims=set(skip_holdout_dims + [""__sample__""]), combined=True, ) ) y_forecasts_mean = y_forecasts.mean((""chain"", ""draw"")) y_forecasts_mean_plotters = list( xarray_var_iter( y_forecasts_mean, skip_dims=set(skip_holdout_dims), combined=True, ) ) x_holdout_plotters = list( xarray_var_iter( x_holdout, var_names=x_holdout_var_names, skip_dims=set(x_holdout.dims), combined=True, ) ) # Necessary when multidim y # If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs y_forecasts_mean_plotters = np.tile(y_forecasts_mean_plotters, (len_x, 1)) y_forecasts_plotters = np.tile(y_forecasts_plotters, (len_x, 1)) x_holdout_plotters = np.tile(x_holdout_plotters, (len_y, 1)) rows, cols = default_grid(length_plotters) tsplot_kwargs = dict( x_plotters=x_plotters, y_plotters=y_plotters, y_mean_plotters=y_mean_plotters, y_hat_plotters=y_hat_plotters, y_holdout_plotters=y_holdout_plotters, x_holdout_plotters=x_holdout_plotters, y_forecasts_plotters=y_forecasts_plotters, y_forecasts_mean_plotters=y_forecasts_mean_plotters, num_samples=num_samples, length_plotters=length_plotters, rows=rows, cols=cols, backend_kwargs=backend_kwargs, y_kwargs=y_kwargs, y_hat_plot_kwargs=y_hat_plot_kwargs, y_mean_plot_kwargs=y_mean_plot_kwargs, vline_kwargs=vline_kwargs, textsize=textsize, figsize=figsize, legend=legend, axes=axes, show=show, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() plot = get_plotting_function(""plot_ts"", ""tsplot"", backend) ax = plot(**tsplot_kwargs) return ax ","def plot_ts( idata, y, x=None, y_hat=None, y_holdout=None, y_forecasts=None, x_holdout=None, plot_dim=None, holdout_dim=None, num_samples=100, backend=None, backend_kwargs=None, y_kwargs=None, y_hat_plot_kwargs=None, y_mean_plot_kwargs=None, vline_kwargs=None, textsize=None, figsize=None, legend=True, axes=None, show=None, ): """"""Plot timeseries data. Parameters ---------- idata : InferenceData InferenceData object. y : str Variable name from ``observed_data``. Values to be plotted on y-axis before holdout. x : str, Optional Values to be plotted on x-axis before holdout. If None, coords of ``y`` dims is chosen. y_hat : str, optional Variable name from ``posterior_predictive``. Assumed to be of shape ``(chain, draw, *y_dims)``. y_holdout : str, optional Variable name from ``observed_data``. It represents the observed data after the holdout period. Useful while testing the model, when you want to compare observed test data with predictions/forecasts. y_forecasts : str, optional Variable name from ``posterior_predictive``. It represents forecasts (posterior predictive) values after holdout period. Useful to compare observed vs predictions/forecasts. Assumed shape ``(chain, draw, *shape)``. x_holdout : str, Defaults to coordinates of ``y``. Variable name from ``constant_data``. If None, coordinates of ``y_holdout`` or coordinates of ``y_forecast`` (either of the two available) is chosen. plot_dim: str, Optional Should be present in ``y.dims``. Necessary for selection of ``x`` if ``x`` is None and ``y`` is multidimensional. holdout_dim: str, Optional Should be present in ``y_holdout.dims`` or ``y_forecats.dims``. Necessary to choose ``x_holdout`` if ``x`` is None and if ``y_holdout`` or ``y_forecasts`` is multidimensional. num_samples : int, default 100 Number of posterior predictive samples drawn from ``y_hat`` and ``y_forecasts``. backend : {""matplotlib"", ""bokeh""}, default ""matplotlib"" Select plotting backend. y_kwargs : dict, optional Passed to :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib. y_hat_plot_kwargs : dict, optional Passed to :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib. y_mean_plot_kwargs : dict, optional Passed to :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib. vline_kwargs : dict, optional Passed to :meth:`mpl:matplotlib.axes.Axes.axvline` in matplotlib. backend_kwargs : dict, optional These are kwargs specific to the backend being used. Passed to :func: `mpl:matplotlib.pyplot.subplots`. figsize : tuple, optional Figure size. If None, it will be defined automatically. textsize : float, optional Text size scaling factor for labels, titles and lines. If None, it will be autoscaled based on ``figsize``. Returns ------- axes: matplotlib axes or bokeh figures. See Also -------- plot_lm : Posterior predictive and mean plots for regression-like data. plot_ppc : Plot for posterior/prior predictive checks. Examples -------- Plot timeseries default plot .. plot:: :context: close-figs >>> import arviz as az >>> nchains, ndraws = (4, 500) >>> obs_data = { ... ""y"": 2 * np.arange(1, 9) + 3, ... ""z"": 2 * np.arange(8, 12) + 3, ... } >>> posterior_predictive = { ... ""y"": np.random.normal( ... (obs_data[""y""] * 1.2) - 3, size=(nchains, ndraws, len(obs_data[""y""])) ... ), ... ""z"": np.random.normal( ... (obs_data[""z""] * 1.2) - 3, size=(nchains, ndraws, len(obs_data[""z""])) ... ), ... } >>> idata = az.from_dict( ... observed_data=obs_data, ... posterior_predictive=posterior_predictive, ... coords={""obs_dim"": np.arange(1, 9), ""pred_dim"": np.arange(8, 12)}, ... dims={""y"": [""obs_dim""], ""z"": [""pred_dim""]}, ... ) >>> ax = az.plot_ts(idata=idata, y=""y"", y_holdout=""z"") Plot timeseries multidim plot .. plot:: :context: close-figs >>> ndim1, ndim2 = (5, 7) >>> data = { ... ""y"": np.random.normal(size=(ndim1, ndim2)), ... ""z"": np.random.normal(size=(ndim1, ndim2)), ... } >>> posterior_predictive = { ... ""y"": np.random.randn(nchains, ndraws, ndim1, ndim2), ... ""z"": np.random.randn(nchains, ndraws, ndim1, ndim2), ... } >>> const_data = {""x"": np.arange(1, 6), ""x_pred"": np.arange(5, 10)} >>> idata = az.from_dict( ... observed_data=data, ... posterior_predictive=posterior_predictive, ... constant_data=const_data, ... dims={ ... ""y"": [""dim1"", ""dim2""], ... ""z"": [""holdout_dim1"", ""holdout_dim2""], ... }, ... coords={ ... ""dim1"": range(ndim1), ... ""dim2"": range(ndim2), ... ""holdout_dim1"": range(ndim1 - 1, ndim1 + 4), ... ""holdout_dim2"": range(ndim2 - 1, ndim2 + 6), ... }, ... ) >>> az.plot_ts( ... idata=idata, ... y=""y"", ... plot_dim=""dim1"", ... y_holdout=""z"", ... holdout_dim=""holdout_dim1"", ... ) """""" # Assign default values if none is provided y_hat = y if y_hat is None and isinstance(y, str) else y_hat y_forecasts = y_holdout if y_forecasts is None and isinstance(y_holdout, str) else y_forecasts # holdout_dim = plot_dim if holdout_dim is None and plot_dim is not None else holdout_dim if isinstance(y, str): y = idata.observed_data[y] if isinstance(y_holdout, str): y_holdout = idata.observed_data[y_holdout] if len(y.dims) > 1 and plot_dim is None: raise ValueError(""Argument plot_dim is needed in case of multidimensional data"") if y_holdout is not None and len(y_holdout.dims) > 1 and holdout_dim is None: raise ValueError(""Argument holdout_dim is needed in case of multidimensional data"") # Assigning values to x x_var_names = None if isinstance(x, str): x = idata.constant_data[x] elif isinstance(x, tuple): x_var_names = x x = idata.constant_data elif x is None: if plot_dim is None: x = y.coords[y.dims[0]] else: x = y.coords[plot_dim] # If posterior_predictive is present in idata and y_hat is there, get its values if isinstance(y_hat, str): if ""posterior_predictive"" not in idata.groups(): warnings.warn(""posterior_predictive not found in idata"", UserWarning) y_hat = None elif hasattr(idata.posterior_predictive, y_hat): y_hat = idata.posterior_predictive[y_hat] else: warnings.warn(""y_hat not found in posterior_predictive"", UserWarning) y_hat = None # If posterior_predictive is present in idata and y_forecasts is there, get its values x_holdout_var_names = None if isinstance(y_forecasts, str): if ""posterior_predictive"" not in idata.groups(): warnings.warn(""posterior_predictive not found in idata"", UserWarning) y_forecasts = None elif hasattr(idata.posterior_predictive, y_forecasts): y_forecasts = idata.posterior_predictive[y_forecasts] else: warnings.warn(""y_hat not found in posterior_predictive"", UserWarning) y_forecasts = None # Assign values to y_holdout if isinstance(y_holdout, str): y_holdout = idata.observed_data[y_holdout] # Assign values to x_holdout. if y_holdout is not None or y_forecasts is not None: if x_holdout is None: if holdout_dim is None: if y_holdout is None: x_holdout = y_forecasts.coords[y_forecasts.dims[-1]] else: x_holdout = y_holdout.coords[y_holdout.dims[-1]] else: if y_holdout is None: x_holdout = y_forecasts.coords[holdout_dim] else: x_holdout = y_holdout.coords[holdout_dim] elif isinstance(x_holdout, str): x_holdout = idata.constant_data[x_holdout] elif isinstance(x_holdout, tuple): x_holdout_var_names = x_holdout x_holdout = idata.constant_data # Choose dims to generate y plotters if plot_dim is None: skip_dims = list(y.dims) elif isinstance(plot_dim, str): skip_dims = [plot_dim] elif isinstance(plot_dim, tuple): skip_dims = list(plot_dim) # Choose dims to generate y_holdout plotters if holdout_dim is None: if y_holdout is not None: skip_holdout_dims = list(y_holdout.dims) elif y_forecasts is not None: skip_holdout_dims = list(y_forecasts.dims) elif isinstance(holdout_dim, str): skip_holdout_dims = [holdout_dim] elif isinstance(holdout_dim, tuple): skip_holdout_dims = list(holdout_dim) # Compulsory plotters y_plotters = list( xarray_var_iter( y, skip_dims=set(skip_dims), combined=True, ) ) # Compulsory plotters x_plotters = list( xarray_var_iter( x, var_names=x_var_names, skip_dims=set(x.dims), combined=True, ) ) # Necessary when multidim y # If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs len_y = len(y_plotters) len_x = len(x_plotters) length_plotters = len_x * len_y y_plotters = np.tile(y_plotters, (len_x, 1)) x_plotters = np.tile(x_plotters, (len_y, 1)) # Generate plotters for all the available data y_mean_plotters = None y_hat_plotters = None if y_hat is not None: total_samples = y_hat.sizes[""chain""] * y_hat.sizes[""draw""] pp_sample_ix = np.random.choice(total_samples, size=num_samples, replace=False) y_hat_satcked = y_hat.stack(__sample__=(""chain"", ""draw""))[..., pp_sample_ix] y_hat_plotters = list( xarray_var_iter( y_hat_satcked, skip_dims=set(skip_dims + [""__sample__""]), combined=True, ) ) y_mean = y_hat.mean((""chain"", ""draw"")) y_mean_plotters = list( xarray_var_iter( y_mean, skip_dims=set(skip_dims), combined=True, ) ) # Necessary when multidim y # If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs y_hat_plotters = np.tile(y_hat_plotters, (len_x, 1)) y_mean_plotters = np.tile(y_mean_plotters, (len_x, 1)) y_holdout_plotters = None x_holdout_plotters = None if y_holdout is not None: y_holdout_plotters = list( xarray_var_iter( y_holdout, skip_dims=set(skip_holdout_dims), combined=True, ) ) x_holdout_plotters = list( xarray_var_iter( x_holdout, var_names=x_holdout_var_names, skip_dims=set(x_holdout.dims), combined=True, ) ) # Necessary when multidim y # If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs y_holdout_plotters = np.tile(y_holdout_plotters, (len_x, 1)) x_holdout_plotters = np.tile(x_holdout_plotters, (len_y, 1)) y_forecasts_plotters = None y_forecasts_mean_plotters = None if y_forecasts is not None: total_samples = y_forecasts.sizes[""chain""] * y_forecasts.sizes[""draw""] pp_sample_ix = np.random.choice(total_samples, size=num_samples, replace=False) y_forecasts_satcked = y_forecasts.stack(__sample__=(""chain"", ""draw""))[..., pp_sample_ix] y_forecasts_plotters = list( xarray_var_iter( y_forecasts_satcked, skip_dims=set(skip_holdout_dims + [""__sample__""]), combined=True, ) ) y_forecasts_mean = y_forecasts.mean((""chain"", ""draw"")) y_forecasts_mean_plotters = list( xarray_var_iter( y_forecasts_mean, skip_dims=set(skip_holdout_dims), combined=True, ) ) x_holdout_plotters = list( xarray_var_iter( x_holdout, var_names=x_holdout_var_names, skip_dims=set(x_holdout.dims), combined=True, ) ) # Necessary when multidim y # If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs y_forecasts_mean_plotters = np.tile(y_forecasts_mean_plotters, (len_x, 1)) y_forecasts_plotters = np.tile(y_forecasts_plotters, (len_x, 1)) x_holdout_plotters = np.tile(x_holdout_plotters, (len_y, 1)) rows, cols = default_grid(length_plotters) tsplot_kwargs = dict( x_plotters=x_plotters, y_plotters=y_plotters, y_mean_plotters=y_mean_plotters, y_hat_plotters=y_hat_plotters, y_holdout_plotters=y_holdout_plotters, x_holdout_plotters=x_holdout_plotters, y_forecasts_plotters=y_forecasts_plotters, y_forecasts_mean_plotters=y_forecasts_mean_plotters, num_samples=num_samples, length_plotters=length_plotters, rows=rows, cols=cols, backend_kwargs=backend_kwargs, y_kwargs=y_kwargs, y_hat_plot_kwargs=y_hat_plot_kwargs, y_mean_plot_kwargs=y_mean_plot_kwargs, vline_kwargs=vline_kwargs, textsize=textsize, figsize=figsize, legend=legend, axes=axes, show=show, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() plot = get_plotting_function(""plot_ts"", ""tsplot"", backend) ax = plot(**tsplot_kwargs) return ax " 58772,"def save_module(module_path, graph, lib, params, cross=None, cross_options=None): """""" Create a tarball containing the generated TVM graph, exported library and parameters Parameters ---------- module_path : str path to the target tar.gz file to be created, including the file name graph : str A JSON-serialized TVM execution graph. lib : tvm.module.Module A TVM module containing the compiled functions. params : dict The parameters (weights) for the TVM module. cross : str or callable object, optional Function that performs the actual compilation cross_options : sst of cross compilation options """""" lib_name = ""mod.so"" graph_name = ""mod.json"" param_name = ""mod.params"" temp = utils.tempdir() path_lib = temp.relpath(lib_name) if not cross: logger.debug(""exporting library to %s"", path_lib) lib.export_library(path_lib) else: logger.debug(""exporting library to %s , using cross compiler %s"", path_lib, cross) lib.export_library(path_lib, cc.cross_compiler(cross, options=cross_options.split(' '))) with open(temp.relpath(graph_name), ""w"") as graph_file: logger.debug(""writing graph to file to %s"", graph_file.name) graph_file.write(graph) with open(temp.relpath(param_name), ""wb"") as params_file: logger.debug(""writing params to file to %s"", params_file.name) params_file.write(runtime.save_param_dict(params)) logger.debug(""saving module as tar file to %s"", module_path) with tarfile.open(module_path, ""w"") as tar: tar.add(path_lib, lib_name) tar.add(temp.relpath(graph_name), graph_name) tar.add(temp.relpath(param_name), param_name) ","def save_module(module_path, graph, lib, params, cross=None, cross_options=None): """""" Create a tarball containing the generated TVM graph, exported library and parameters Parameters ---------- module_path : str path to the target tar.gz file to be created, including the file name graph : str A JSON-serialized TVM execution graph. lib : tvm.module.Module A TVM module containing the compiled functions. params : dict The parameters (weights) for the TVM module. cross : str or callable object, optional Function that performs the actual compilation cross_options : str, optional Command line options to be passed to the cross compiler. """""" lib_name = ""mod.so"" graph_name = ""mod.json"" param_name = ""mod.params"" temp = utils.tempdir() path_lib = temp.relpath(lib_name) if not cross: logger.debug(""exporting library to %s"", path_lib) lib.export_library(path_lib) else: logger.debug(""exporting library to %s , using cross compiler %s"", path_lib, cross) lib.export_library(path_lib, cc.cross_compiler(cross, options=cross_options.split(' '))) with open(temp.relpath(graph_name), ""w"") as graph_file: logger.debug(""writing graph to file to %s"", graph_file.name) graph_file.write(graph) with open(temp.relpath(param_name), ""wb"") as params_file: logger.debug(""writing params to file to %s"", params_file.name) params_file.write(runtime.save_param_dict(params)) logger.debug(""saving module as tar file to %s"", module_path) with tarfile.open(module_path, ""w"") as tar: tar.add(path_lib, lib_name) tar.add(temp.relpath(graph_name), graph_name) tar.add(temp.relpath(param_name), param_name) " 8229,"def read_cdf(fname): """""" Read a CDF file that follows the ISTP/IACG guidelines. Parameters ---------- fname : path-like Location of single CDF file to read. Returns ------- list[GenericTimeSeries] A list of time series objects, one for each unique time index within the CDF file. References ---------- Space Physics Guidelines for CDF https://spdf.gsfc.nasa.gov/sp_use_of_cdf.html """""" cdf = cdflib.CDF(str(fname)) # Extract the time varying variables cdf_info = cdf.cdf_info() meta = cdf.globalattsget() all_var_keys = cdf_info['rVariables'] + cdf_info['zVariables'] var_attrs = {key: cdf.varattsget(key) for key in all_var_keys} # Get keys that depend on time var_keys = [var for var in var_attrs if 'DEPEND_0' in var_attrs[var]] # Get unique time index keys time_index_keys = sorted(set([var_attrs[var]['DEPEND_0'] for var in var_keys])) all_ts = [] # For each time index, construct a GenericTimeSeries for index_key in time_index_keys: index = cdf.varget(index_key) # TODO: use to_astropy_time() instead here when we drop pandas in timeseries index = CDFepoch.to_datetime(index) df = pd.DataFrame(index=pd.DatetimeIndex(name=index_key, data=index)) units = {} for var_key in sorted(var_keys): attrs = var_attrs[var_key] if attrs['DEPEND_0'] != index_key: continue # Get data if cdf.varinq(var_key)['Last_Rec'] == -1: log.debug(f'Skipping {var_key} in {fname} as it has zero elements') continue data = cdf.varget(var_key) # Get units unit_str = attrs['UNITS'] try: unit = u.Unit(unit_str) except ValueError as e: if unit_str in _known_units: unit = _known_units[unit_str] else: raise warn_user(f'astropy did not recognize units of ""{unit_str}"". ' 'Assigning dimensionless units. ' 'If you think this unit should not be dimensionless, ' 'please raise an issue at https://github.com/sunpy/sunpy/issues') unit = u.dimensionless_unscaled if data.ndim == 2: # Multiple columns, give each column a unique label for i, col in enumerate(data.T): df[var_key + f'_{i}'] = col units[var_key + f'_{i}'] = unit else: # Single column df[var_key] = data units[var_key] = unit all_ts.append(GenericTimeSeries(data=df, units=units, meta=meta)) return all_ts ","def read_cdf(fname): """""" Read a CDF file that follows the ISTP/IACG guidelines. Parameters ---------- fname : path-like Location of single CDF file to read. Returns ------- list[GenericTimeSeries] A list of time series objects, one for each unique time index within the CDF file. References ---------- Space Physics Guidelines for CDF https://spdf.gsfc.nasa.gov/sp_use_of_cdf.html """""" cdf = cdflib.CDF(str(fname)) # Extract the time varying variables cdf_info = cdf.cdf_info() meta = cdf.globalattsget() all_var_keys = cdf_info['rVariables'] + cdf_info['zVariables'] var_attrs = {key: cdf.varattsget(key) for key in all_var_keys} # Get keys that depend on time var_keys = [var for var in var_attrs if 'DEPEND_0' in var_attrs[var]] # Get unique time index keys time_index_keys = sorted(set([var_attrs[var]['DEPEND_0'] for var in var_keys])) all_ts = [] # For each time index, construct a GenericTimeSeries for index_key in time_index_keys: index = cdf.varget(index_key) # TODO: use to_astropy_time() instead here when we drop pandas in timeseries index = CDFepoch.to_datetime(index) df = pd.DataFrame(index=pd.DatetimeIndex(name=index_key, data=index)) units = {} for var_key in sorted(var_keys): attrs = var_attrs[var_key] if attrs['DEPEND_0'] != index_key: continue # Get data if cdf.varinq(var_key)['Last_Rec'] == -1: log.debug(f'Skipping {var_key} in {fname} as it has zero elements') continue data = cdf.varget(var_key) # Get units unit_str = attrs['UNITS'] try: unit = u.Unit(unit_str) except ValueError as e: if unit_str in _known_units: unit = _known_units[unit_str] else: warn_user(f'astropy did not recognize units of ""{unit_str}"". ' 'Assigning dimensionless units. ' 'If you think this unit should not be dimensionless, ' 'please raise an issue at https://github.com/sunpy/sunpy/issues') unit = u.dimensionless_unscaled if data.ndim == 2: # Multiple columns, give each column a unique label for i, col in enumerate(data.T): df[var_key + f'_{i}'] = col units[var_key + f'_{i}'] = unit else: # Single column df[var_key] = data units[var_key] = unit all_ts.append(GenericTimeSeries(data=df, units=units, meta=meta)) return all_ts " 56743,"def test_fast_sample_posterior_predictive_shape_assertions(): """""" This test checks the shape assertions in pm.fast_sample_posterior_predictive. Originally reported - https://github.com/pymc-devs/pymc3/issues/4778 """""" with pm.Model(): p = pm.Beta(""p"", 2, 2) trace = pm.sample(draws=500, chains=1, return_inferencedata=True) with pm.Model() as m_forward: p = pm.Beta(""p"", 2, 2) b2 = pm.Binomial(""b2"", n=1, p=p) b3 = pm.Binomial(""b3"", n=1, p=p * b2) with m_forward: trace_forward = pm.fast_sample_posterior_predictive(trace, var_names=[""p"", ""b2"", ""b3""]) for free_rv in trace_forward.values(): assert free_rv.shape[0] == 500 ","def test_fast_sample_posterior_predictive_shape_assertions(): """""" This test checks the shape assertions in pm.fast_sample_posterior_predictive. Originally reported - https://github.com/pymc-devs/pymc3/issues/4778 """""" with pm.Model(): p = pm.Beta(""p"", 2, 2) trace = pm.sample(tune=30, draws=50, chains=1, return_inferencedata=True, compute_convergence_checks=False) with pm.Model() as m_forward: p = pm.Beta(""p"", 2, 2) b2 = pm.Binomial(""b2"", n=1, p=p) b3 = pm.Binomial(""b3"", n=1, p=p * b2) with m_forward: trace_forward = pm.fast_sample_posterior_predictive(trace, var_names=[""p"", ""b2"", ""b3""]) for free_rv in trace_forward.values(): assert free_rv.shape[0] == 50 " 3793,"def make_version_tuple(): """""" We want to expose a numeric version tuple to make it easier for dependencies to have conditional code without having to parse our version string. 1) It can be tricky as 1) you don't want to compare string. 2) it can be tempted to just split on dot and map convert to int, but that will fail on rc, and others. It is ok to drop all the non-numeric items as conditional code is unlikely to rely on those values. We also don't add the non-numeric elements at the end, as strings should anyway not be compared. """""" import re str_major, str_minor, str_patch_extra = short_version.split(""."")[:3] major = int(str_major) minor = int(str_minor) patch = int(re.findall(r""\d+"", str_patch_extra)[0]) return (major, minor, patch) ","def make_version_tuple() -> tuple[int, int, int]: """""" We want to expose a numeric version tuple to make it easier for dependencies to have conditional code without having to parse our version string. 1) It can be tricky as 1) you don't want to compare string. 2) it can be tempted to just split on dot and map convert to int, but that will fail on rc, and others. It is ok to drop all the non-numeric items as conditional code is unlikely to rely on those values. We also don't add the non-numeric elements at the end, as strings should anyway not be compared. """""" import re str_major, str_minor, str_patch_extra = short_version.split(""."")[:3] major = int(str_major) minor = int(str_minor) patch = int(re.findall(r""\d+"", str_patch_extra)[0]) return (major, minor, patch) " 13300,"def _project_docs(db, project_name=None, show_empty_releases=False): queries = [Release.yanked.is_(False)] if not show_empty_releases: queries.append(Release.files) releases_list = ( db.query(Release.id) .filter(*queries) .order_by( Release.project_id, Release.is_prerelease.nullslast(), Release._pypi_ordering.desc(), ) .distinct(Release.project_id) ) if project_name: releases_list = releases_list.join(Project).filter(Project.name == project_name) releases_list = releases_list.subquery() r = aliased(Release, name=""r"") all_versions = ( db.query(func.array_agg(r.version)) .filter(r.project_id == Release.project_id) .correlate(Release) .as_scalar() .label(""all_versions"") ) classifiers = ( db.query(func.array_agg(Classifier.classifier)) .select_from(release_classifiers) .join(Classifier, Classifier.id == release_classifiers.c.trove_id) .filter(Release.id == release_classifiers.c.release_id) .correlate(Release) .as_scalar() .label(""classifiers"") ) release_data = ( db.query( Description.raw.label(""description""), Release.version.label(""latest_version""), all_versions, Release.author, Release.author_email, Release.maintainer, Release.maintainer_email, Release.home_page, Release.summary, Release.keywords, Release.platform, Release.download_url, Release.created, classifiers, Project.normalized_name, Project.name, Project.zscore, ) .select_from(releases_list) .join(Release, Release.id == releases_list.c.id) .join(Description) .outerjoin(Release.project) ) for release in windowed_query(release_data, Release.project_id, 50000): p = ProjectDocument.from_db(release) p._index = None p.full_clean() doc = p.to_dict(include_meta=True) doc.pop(""_index"", None) yield doc ","def _project_docs(db, project_name=None, show_empty_releases=False): queries = [Release.yanked.is_(False)] if not show_empty_releases: queries.append(Release.files) releases_list = ( db.query(Release.id) .filter(Release.yanked.is_(False), Release.files) .order_by( Release.project_id, Release.is_prerelease.nullslast(), Release._pypi_ordering.desc(), ) .distinct(Release.project_id) ) if project_name: releases_list = releases_list.join(Project).filter(Project.name == project_name) releases_list = releases_list.subquery() r = aliased(Release, name=""r"") all_versions = ( db.query(func.array_agg(r.version)) .filter(r.project_id == Release.project_id) .correlate(Release) .as_scalar() .label(""all_versions"") ) classifiers = ( db.query(func.array_agg(Classifier.classifier)) .select_from(release_classifiers) .join(Classifier, Classifier.id == release_classifiers.c.trove_id) .filter(Release.id == release_classifiers.c.release_id) .correlate(Release) .as_scalar() .label(""classifiers"") ) release_data = ( db.query( Description.raw.label(""description""), Release.version.label(""latest_version""), all_versions, Release.author, Release.author_email, Release.maintainer, Release.maintainer_email, Release.home_page, Release.summary, Release.keywords, Release.platform, Release.download_url, Release.created, classifiers, Project.normalized_name, Project.name, Project.zscore, ) .select_from(releases_list) .join(Release, Release.id == releases_list.c.id) .join(Description) .outerjoin(Release.project) ) for release in windowed_query(release_data, Release.project_id, 50000): p = ProjectDocument.from_db(release) p._index = None p.full_clean() doc = p.to_dict(include_meta=True) doc.pop(""_index"", None) yield doc " 7607,"def __getattr__(name): """"""Get realizations using lazy import from `PEP 562 `_. Raises ------ AttributeError If ""name"" is in :mod:`astropy.cosmology.realizations` """""" if name not in realizations.available: raise AttributeError(f""module {__name__!r} has no attribute {name!r}."") return getattr(realizations, name) ","def __getattr__(name): """"""Get realizations using lazy import from `PEP 562 `_. Raises ------ AttributeError If ""name"" is not in :mod:`astropy.cosmology.realizations` """""" if name not in realizations.available: raise AttributeError(f""module {__name__!r} has no attribute {name!r}."") return getattr(realizations, name) " 30358,"def get_release_notes_draft(github_token, asset_id): """""" if possible, download current release draft from content repository in github. :param github_token: github token with push permission (in order to get the draft). :param asset_id: content build's asset id. :return: draft text (or empty string on error). """""" # Disable insecure warnings requests.packages.urllib3.disable_warnings() # pylint: disable=no-member try: res = requests.get('https://api.github.com/repos/demisto/content/releases', verify=False, # guardrails-disable-line headers={'Authorization': 'token {}'.format(github_token)}) except requests.exceptions.ConnectionError as exc: print_warning('unable to get release draft, reason:\n{}'.format(str(exc))) return '' if res.status_code != 200: print_warning('unable to get release draft ({}), reason:\n{}'.format(res.status_code, res.text)) return '' drafts = [release for release in res.json() if release.get('draft', False)] if drafts: if len(drafts) == 1: return re.sub(r'Release Notes for version .* \((\d{5,})\)', ""Release Notes for version ({})"".format(asset_id), drafts[0]['body']) print_warning('Too many drafts to choose from ({}), skipping update.'.format(len(drafts))) return '' ","def get_release_notes_draft(github_token, asset_id): """""" if possible, download current release draft from content repository in github. :param github_token: github token with push permission (in order to get the draft). :param asset_id: content build's asset id. :return: draft text (or empty string on error). """""" # Disable insecure warnings requests.packages.urllib3.disable_warnings() # pylint: disable=no-member try: res = requests.get('https://api.github.com/repos/demisto/content/releases', verify=False, # guardrails-disable-line headers={'Authorization': 'token {}'.format(github_token)}) except requests.exceptions.ConnectionError as exc: print_warning('unable to get release draft, reason:\n{}'.format(str(exc))) return '' if res.status_code != 200: print_warning('unable to get release draft ({}), reason:\n{}'.format(res.status_code, res.text)) return '' drafts = [release for release in res.json() if release.get('draft', False)] if drafts: if len(drafts) == 1: return re.sub(r'Release Notes for version .* \((\d{5,})\)', 'Release Notes for version ({})'.format(asset_id), drafts[0]['body']) print_warning('Too many drafts to choose from ({}), skipping update.'.format(len(drafts))) return '' " 6075,"def getQueuesResolved(siteDict): """""" Get the list of queue descriptions merging site/ce/queue parameters and adding some derived parameters. :param dict siteDict: dictionary with configuration data as returned by Resources.getQueues() method :return: S_OK/S_ERROR, Value dictionary per queue with configuration data updated, e.g. for SiteDirector """""" queueDict = {} for site in siteDict: for ce in siteDict[site]: ceDict = siteDict[site][ce] qDict = ceDict.pop('Queues') for queue in qDict: queueName = '%s_%s' % (ce, queue) queueDict[queueName] = qDict[queue] queueDict[queueName] = qDict[queue] queueDict[queueName]['Queue'] = queue queueDict[queueName]['Site'] = site # Evaluate the CPU limit of the queue according to the Glue convention # To Do: should be a utility if ""maxCPUTime"" in queueDict[queueName] and \ ""SI00"" in queueDict[queueName]: maxCPUTime = float(queueDict[queueName]['maxCPUTime']) # For some sites there are crazy values in the CS maxCPUTime = max(maxCPUTime, 0) maxCPUTime = min(maxCPUTime, 86400 * 12.5) si00 = float(queueDict[queueName]['SI00']) queueCPUTime = 60. / 250. * maxCPUTime * si00 queueDict[queueName]['CPUTime'] = int(queueCPUTime) # Tags & RequiredTags defined on the Queue level and on the CE level are concatenated # This also converts them from a string to a list if required. for tagFieldName in ('Tag', 'RequiredTag'): ceTags = ceDict.get(tagFieldName, []) if isinstance(ceTags, basestring): ceTags = fromChar(ceTags) queueTags = queueDict[queueName].get(tagFieldName) if queueTags and isinstance(queueTags, basestring): queueTags = fromChar(queueTags) queueDict[queueName][tagFieldName] = queueTags if ceTags: if queueTags: allTags = list(set(ceTags + queueTags)) queueDict[queueName][tagFieldName] = allTags else: queueDict[queueName][tagFieldName] = ceTags # Some parameters can be defined on the CE level and are inherited by all Queues for parameter in ['MaxRAM', 'NumberOfProcessors', 'WholeNode']: queueParameter = queueDict[queueName].get(parameter) ceParameter = ceDict.get(parameter) if ceParameter or queueParameter: queueDict[queueName][parameter] = ceParameter if not queueParameter \ else queueParameter # If we have a multi-core queue add MultiProcessor tag if queueDict[queueName].get('NumberOfProcessors', 1) > 1: queueDict[queueName].setdefault('Tag', []).append('MultiProcessor') queueDict[queueName]['CEName'] = ce queueDict[queueName]['GridCE'] = ce queueDict[queueName]['CEType'] = ceDict['CEType'] queueDict[queueName]['GridMiddleware'] = ceDict['CEType'] queueDict[queueName]['QueueName'] = queue platform = '' if ""Platform"" in queueDict[queueName]: platform = queueDict[queueName]['Platform'] elif ""Platform"" in ceDict: platform = ceDict['Platform'] elif ""OS"" in ceDict: architecture = ceDict.get('architecture', 'x86_64') platform = '_'.join([architecture, ceDict['OS']]) queueDict[queueName]['Platform'] = platform if ""Platform"" not in queueDict[queueName] and platform: result = getDIRACPlatform(platform) if result['OK']: queueDict[queueName]['Platform'] = result['Value'][0] return S_OK(queueDict) ","def getQueuesResolved(siteDict): """""" Get the list of queue descriptions merging site/ce/queue parameters and adding some derived parameters. :param dict siteDict: dictionary with configuration data as returned by Resources.getQueues() method :return: S_OK/S_ERROR, Value dictionary per queue with configuration data updated, e.g. for SiteDirector """""" queueDict = {} for site in siteDict: for ce in siteDict[site]: ceDict = siteDict[site][ce] qDict = ceDict.pop('Queues') for queue in qDict: queueName = '%s_%s' % (ce, queue) queueDict[queueName] = qDict[queue] queueDict[queueName] = qDict[queue] queueDict[queueName]['Queue'] = queue queueDict[queueName]['Site'] = site # Evaluate the CPU limit of the queue according to the Glue convention # To Do: should be a utility if ""maxCPUTime"" in queueDict[queueName] and \ ""SI00"" in queueDict[queueName]: maxCPUTime = float(queueDict[queueName]['maxCPUTime']) # For some sites there are crazy values in the CS maxCPUTime = max(maxCPUTime, 0) maxCPUTime = min(maxCPUTime, 86400 * 12.5) si00 = float(queueDict[queueName]['SI00']) queueCPUTime = 60. / 250. * maxCPUTime * si00 queueDict[queueName]['CPUTime'] = int(queueCPUTime) # Tags & RequiredTags defined on the Queue level and on the CE level are concatenated # This also converts them from a string to a list if required. for tagFieldName in ('Tag', 'RequiredTag'): ceTags = ceDict.get(tagFieldName, []) if isinstance(ceTags, basestring): ceTags = fromChar(ceTags) queueTags = queueDict[queueName].get(tagFieldName) if isinstance(queueTags, six.string_types): queueTags = fromChar(queueTags) queueDict[queueName][tagFieldName] = queueTags if ceTags: if queueTags: allTags = list(set(ceTags + queueTags)) queueDict[queueName][tagFieldName] = allTags else: queueDict[queueName][tagFieldName] = ceTags # Some parameters can be defined on the CE level and are inherited by all Queues for parameter in ['MaxRAM', 'NumberOfProcessors', 'WholeNode']: queueParameter = queueDict[queueName].get(parameter) ceParameter = ceDict.get(parameter) if ceParameter or queueParameter: queueDict[queueName][parameter] = ceParameter if not queueParameter \ else queueParameter # If we have a multi-core queue add MultiProcessor tag if queueDict[queueName].get('NumberOfProcessors', 1) > 1: queueDict[queueName].setdefault('Tag', []).append('MultiProcessor') queueDict[queueName]['CEName'] = ce queueDict[queueName]['GridCE'] = ce queueDict[queueName]['CEType'] = ceDict['CEType'] queueDict[queueName]['GridMiddleware'] = ceDict['CEType'] queueDict[queueName]['QueueName'] = queue platform = '' if ""Platform"" in queueDict[queueName]: platform = queueDict[queueName]['Platform'] elif ""Platform"" in ceDict: platform = ceDict['Platform'] elif ""OS"" in ceDict: architecture = ceDict.get('architecture', 'x86_64') platform = '_'.join([architecture, ceDict['OS']]) queueDict[queueName]['Platform'] = platform if ""Platform"" not in queueDict[queueName] and platform: result = getDIRACPlatform(platform) if result['OK']: queueDict[queueName]['Platform'] = result['Value'][0] return S_OK(queueDict) " 31222,"def gh_get_message_command(client: Client, args: Dict[str, Any]) -> CommandResults: ghid = argToList(args.get('id')) includeheaders = args.get('includeheaders', ""false"").lower() == ""true"" showalllinks = args.get('showalllinks', ""false"").lower() == ""true"" result = client.search_events({""filters"": [{""eventId"": ghid}]}) if len(result.get(""results"", [])) > 0: message = result.get(""results"", [None])[0] envelope = { ""ID"": message.get(""eventId""), ""Received"": message.get(""timestamp""), ""Mailbox"": message.get(""origin""), ""Recipients"": message.get(""targets""), ""Subject"": message.get(""subject""), ""Display Name"": message.get(""displayName""), ""From Address"": message.get(""source""), ""From Domain"": message.get(""source"").split(""@"")[-1], ""Reply-To"": message.get(""replyTo""), ""Return-Path"": message.get(""sourcePath""), ""IP Address"": message.get(""ip""), } envelope_md = tableToMarkdown(""Message Details"", envelope, envelope.keys()) authentication = { ""SPF"": message.get(""spf""), ""DKIM"": message.get(""dkim""), ""DMARC"": message.get(""dmarc""), ""Authentication Results"": message.get(""authenticationResults"") } authentication_md = tableToMarkdown(""Message Authentication"", authentication, authentication.keys()) scores = { ""OWL"": message.get(""owlScore""), ""Sender Anomaly"": message.get(""anomalyScore""), ""Authenication Risk"": message.get(""authScore""), ""Name Spoofing"": message.get(""homographScore"") } scores_md = tableToMarkdown(""Risk Analysis Factors"", scores, scores.keys()) links = [] for link in message.get(""links""): link_dict = { ""Text"": link.get(""text""), ""Url"": link.get(""url""), ""Tags"": "", "".join(link.get(""tags"", [])) } if showalllinks: links.append(link_dict) else: if ""suspicious"" in link_dict['Tags'] or ""malicious"" in link_dict['Tags']: links.append(link_dict) # break if showalllinks: links_md = tableToMarkdown(""Links"", links, [""Text"", ""Url"", ""Tags""]) else: links_md = tableToMarkdown(""Suspicious/Malicious Links"", links, [""Text"", ""Url"", ""Tags""]) files = [] for file in message.get(""files""): f = { ""Name"": file.get(""fileName""), ""Type"": file.get(""fileType""), ""SHA256"": file.get(""fileHash"") } files.append(f) files_md = tableToMarkdown(""Files"", files, [""Name"", ""Type"", ""SHA256""]) policies = [] if message.get(""flag"") is not None: for policy_id in message.get(""flag""): policy = client.get_policy(policy_id).get(""policy"", {}) actions = [] for action in policy.get(""actions""): actions.append(action.get(""type"")) p = { ""ID"": policy.get(""id""), ""Name"": policy.get(""name""), ""Actions"": "","".join(actions) } policies.append(p) policies_md = tableToMarkdown(""Policies"", policies, [""ID"", ""Name"", ""Actions""]) headers = [] msgheaders = message.get(""headers"") for header in message.get(""headers"").keys(): h = { ""Name"": header, ""Value"": msgheaders[header] } headers.append(h) if includeheaders: headers_md = tableToMarkdown(""Headers"", headers, [""Name"", ""Value""]) else: headers_md = """" message_md = envelope_md + authentication_md + scores_md + links_md + files_md + policies_md + headers_md return CommandResults( readable_output=message_md, outputs_prefix='GreatHorn.Message', outputs_key_field='eventId', outputs=result.get(""results"", [None])[0] ) else: return CommandResults( readable_output=""GreatHorn event not found"", outputs={} ) ","def gh_get_message_command(client: Client, args: Dict[str, Any]) -> CommandResults: ghid = argToList(args.get('id')) includeheaders = args.get('includeheaders', ""false"").lower() == ""true"" showalllinks = args.get('showalllinks', ""false"").lower() == ""true"" result = client.search_events({""filters"": [{""eventId"": ghid}]}) if len(result.get(""results"", [])) > 0: message = result.get(""results"", [None])[0] envelope = { ""ID"": message.get(""eventId""), ""Received"": message.get(""timestamp""), ""Mailbox"": message.get(""origin""), ""Recipients"": message.get(""targets""), ""Subject"": message.get(""subject""), ""Display Name"": message.get(""displayName""), ""From Address"": message.get(""source""), ""From Domain"": message.get(""source"").split(""@"")[-1], ""Reply-To"": message.get(""replyTo""), ""Return-Path"": message.get(""sourcePath""), ""IP Address"": message.get(""ip""), } envelope_md = tableToMarkdown(""Message Details"", envelope, envelope.keys()) authentication = { ""SPF"": message.get(""spf""), ""DKIM"": message.get(""dkim""), ""DMARC"": message.get(""dmarc""), ""Authentication Results"": message.get(""authenticationResults"") } authentication_md = tableToMarkdown(""Message Authentication"", authentication, authentication.keys()) scores = { ""OWL"": message.get(""owlScore""), ""Sender Anomaly"": message.get(""anomalyScore""), ""Authenication Risk"": message.get(""authScore""), ""Name Spoofing"": message.get(""homographScore"") } scores_md = tableToMarkdown(""Risk Analysis Factors"", scores, scores.keys()) links = [] for link in message.get(""links""): link_dict = { ""Text"": link.get(""text""), ""Url"": link.get(""url""), ""Tags"": "", "".join(link.get(""tags"", [])) } if showalllinks: links.append(link_dict) else: if ""suspicious"" in link_dict['Tags'] or ""malicious"" in link_dict['Tags']: links.append(link_dict) # break if showalllinks: links_md = tableToMarkdown(""Links"", links, [""Text"", ""Url"", ""Tags""]) else: links_md = tableToMarkdown(""Suspicious/Malicious Links"", links, [""Text"", ""Url"", ""Tags""]) files = [] for file in message.get(""files""): f = { ""Name"": file.get(""fileName""), ""Type"": file.get(""fileType""), ""SHA256"": file.get(""fileHash"") } files.append(f) files_md = tableToMarkdown(""Files"", files, [""Name"", ""Type"", ""SHA256""]) policies = [] if message.get(""flag"") is not None: for policy_id in message.get(""flag""): policy = client.get_policy(policy_id).get(""policy"", {}) actions = [] for action in policy.get(""actions""): actions.append(action.get(""type"")) p = { ""ID"": policy.get(""id""), ""Name"": policy.get(""name""), ""Actions"": "","".join(actions) } policies.append(p) policies_md = tableToMarkdown(""Policies"", policies, [""ID"", ""Name"", ""Actions""]) headers = [] msgheaders = message.get(""headers"") for header in message.get(""headers"").keys(): h = { ""Name"": header, ""Value"": msgheaders[header] } headers.append(h) if includeheaders: headers_md = tableToMarkdown(""Headers"", headers, [""Name"", ""Value""]) else: headers_md = """" message_md = envelope_md + authentication_md + scores_md + links_md + files_md + policies_md + headers_md return CommandResults( readable_output=message_md, outputs_prefix='GreatHorn.Message', outputs_key_field='eventId', outputs=result.get(""results"")[0] ) else: return CommandResults( readable_output=""GreatHorn event not found"", outputs={} ) " 1208,"def check_arr_seq_view(seq_view, seq): assert seq_view._is_view is True assert (seq_view is not seq) is True assert (np.may_share_memory(seq_view._data, seq._data)) is True assert seq_view._offsets is not seq._offsets assert seq_view._lengths is not seq._lengths ","def check_arr_seq_view(seq_view, seq): assert seq_view._is_view is True assert seq_view is not seq assert (np.may_share_memory(seq_view._data, seq._data)) is True assert seq_view._offsets is not seq._offsets assert seq_view._lengths is not seq._lengths " 56681,"def run_solr_query(param=None, rows=100, page=1, sort=None, spellcheck_count=None, offset=None, fields=None, facet=True): param = param or {} # use page when offset is not specified if offset is None: offset = rows * (page - 1) (q_list, use_dismax) = build_q_list(param) params = [ ('fl', ','.join(fields or [ 'key', 'author_name', 'author_key', 'title', 'subtitle', 'edition_count', 'ia', 'has_fulltext', 'first_publish_year', 'cover_i', 'cover_edition_key', 'public_scan_b', 'lending_edition_s', 'lending_identifier_s', 'language', 'ia_collection_s'])), ('fq', 'type:work'), ('q.op', 'AND'), ('start', offset), ('rows', rows), ] if spellcheck_count is None: spellcheck_count = default_spellcheck_count if spellcheck_count: params.append(('spellcheck', 'true')) params.append(('spellcheck.count', spellcheck_count)) if facet: params.append(('facet', 'true')) for facet in FACET_FIELDS: params.append(('facet.field', facet)) if q_list: if use_dismax: params.append(('q', ' '.join(q_list))) params.append(('defType', 'dismax')) params.append(('qf', 'text title^10 author_name^10')) params.append(('bf', 'sqrt(edition_count)')) else: params.append(('q', ' '.join(q_list + ['_val_:""sqrt(edition_count)""^10']))) if 'public_scan' in param: v = param.pop('public_scan').lower() if v in ('true', 'false'): if v == 'false': # also constrain on print disabled since the index may not be in sync param.setdefault('print_disabled', 'false') params.append(('fq', 'public_scan_b:%s' % v)) if 'print_disabled' in param: v = param.pop('print_disabled').lower() if v in ('true', 'false'): minus = '-' if v == 'false' else '' params.append(('fq', '%ssubject_key:protected_daisy' % minus)) if 'has_fulltext' in param: v = param['has_fulltext'].lower() if v not in ('true', 'false'): del param['has_fulltext'] params.append(('fq', 'has_fulltext:%s' % v)) for field in FACET_FIELDS: if field == 'has_fulltext': continue if field == 'author_facet': field = 'author_key' if field not in param: continue values = param[field] params += [('fq', '%s:""%s""' % (field, val)) for val in values if val] if sort: params.append(('sort', sort)) if 'wt' in param: params.append(('wt', param.get('wt'))) url = solr_select_url + '?' + urlencode(params) response = execute_solr_query(url) solr_result = response.content if response else None # bytes or None return (solr_result, url, q_list) ","def run_solr_query(param=None, rows=100, page=1, sort=None, spellcheck_count=None, offset=None, fields=None, facet=True): param = param or {} # use page when offset is not specified if offset is None: offset = rows * (page - 1) (q_list, use_dismax) = build_q_list(param) params = [ ('fl', ','.join(fields or [ 'key', 'author_name', 'author_key', 'title', 'subtitle', 'edition_count', 'ia', 'has_fulltext', 'first_publish_year', 'cover_i', 'cover_edition_key', 'public_scan_b', 'lending_edition_s', 'lending_identifier_s', 'language', 'ia_collection_s'])), ('fq', 'type:work'), ('q.op', 'AND'), ('start', offset), ('rows', rows), ] if spellcheck_count is None: spellcheck_count = default_spellcheck_count if spellcheck_count: params.append(('spellcheck', 'true')) params.append(('spellcheck.count', spellcheck_count)) if facet: params.append(('facet', 'true')) for facet in FACET_FIELDS: params.append(('facet.field', facet)) if q_list: if use_dismax: params.append(('q', ' '.join(q_list))) params.append(('defType', 'dismax')) params.append(('qf', 'text title^20 author_name^20')) params.append(('bf', 'min(100,edition_count)')) else: params.append(('q', ' '.join(q_list + ['_val_:""sqrt(edition_count)""^10']))) if 'public_scan' in param: v = param.pop('public_scan').lower() if v in ('true', 'false'): if v == 'false': # also constrain on print disabled since the index may not be in sync param.setdefault('print_disabled', 'false') params.append(('fq', 'public_scan_b:%s' % v)) if 'print_disabled' in param: v = param.pop('print_disabled').lower() if v in ('true', 'false'): minus = '-' if v == 'false' else '' params.append(('fq', '%ssubject_key:protected_daisy' % minus)) if 'has_fulltext' in param: v = param['has_fulltext'].lower() if v not in ('true', 'false'): del param['has_fulltext'] params.append(('fq', 'has_fulltext:%s' % v)) for field in FACET_FIELDS: if field == 'has_fulltext': continue if field == 'author_facet': field = 'author_key' if field not in param: continue values = param[field] params += [('fq', '%s:""%s""' % (field, val)) for val in values if val] if sort: params.append(('sort', sort)) if 'wt' in param: params.append(('wt', param.get('wt'))) url = solr_select_url + '?' + urlencode(params) response = execute_solr_query(url) solr_result = response.content if response else None # bytes or None return (solr_result, url, q_list) " 45135,"def flag_is_enabled( flag_name: str, default=False, client: FeatureFlagClient = None, **conditions: Optional[Any] ): """""" Check if a feature flag is enabled. This function always returns False if the setting PREFECT_CLOUD_ENABLE_FEATURE_FLAGGING is false. NOTE: If `flag_is_enabled()` is called for a feature that has conditions, but the caller does not give any conditions, the current state of the flag is returned. Args: flag_name: the name of the feature flag default: the default return value to use if no feature flag with the given name exists. Defaults to False. client: The FeatureFlagClient instance to use. Defaults to a client configured to look at an in-memory feature store. conditions: keyword arguments, e.g. is_admin=True, to check against any Conditions on the flag Returns: bool: whether the flag is enabled """""" if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value(): return False if not client: client = get_features_client() return client.is_enabled(flag_name, default=default, **conditions) ","def flag_is_enabled( flag_name: str, default=False, client: FeatureFlagClient = None, **conditions: Optional[Any] ): """""" Check if a feature flag is enabled. This function always returns False if the setting `PREFECT_CLOUD_ENABLE_FEATURE_FLAGGING` is false. NOTE: If `flag_is_enabled()` is called for a feature that has conditions, but the caller does not give any conditions, the current state of the flag is returned. Args: flag_name: the name of the feature flag default: the default return value to use if no feature flag with the given name exists. Defaults to False. client: The FeatureFlagClient instance to use. Defaults to a client configured to look at an in-memory feature store. conditions: keyword arguments, e.g. is_admin=True, to check against any Conditions on the flag Returns: bool: whether the flag is enabled """""" if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value(): return False if not client: client = get_features_client() return client.is_enabled(flag_name, default=default, **conditions) " 29267,"def validate_topic_and_sub_topic_change(obj): """"""Validates Topic or Sub topic change. Args: obj: dict. Data that needs to be validated. """""" allowed_commands = [ command['name'] for command in topic_domain.ALLOWED_COMMANDS ] if obj['cmd'] not in allowed_commands: raise base.BaseHandler.InvalidInputException( '%s cmd is not allowed.' % obj['cmd'] ) ","def validate_topic_and_sub_topic_change(obj): """"""Validates Topic or Subtopic change. Args: obj: dict. Data that needs to be validated. """""" allowed_commands = [ command['name'] for command in topic_domain.ALLOWED_COMMANDS ] if obj['cmd'] not in allowed_commands: raise base.BaseHandler.InvalidInputException( '%s cmd is not allowed.' % obj['cmd'] ) " 3455,"def delete_groups( request: Request, projects: Sequence[""Project""], organization_id: int, search_fn: SearchFunction, ) -> Response: """""" `search_fn` refers to the `search.query` method with the appropriate project, org, environment, and search params already bound """""" group_ids = request.GET.getlist(""id"") if group_ids: group_list = list( Group.objects.filter( project__in=projects, project__organization_id=organization_id, id__in=set(group_ids), ).exclude(status__in=[GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS]) ) else: try: cursor_result, _ = search_fn( { ""limit"": BULK_MUTATION_LIMIT, ""paginator_options"": {""max_limit"": BULK_MUTATION_LIMIT}, } ) except ValidationError as exc: return Response({""detail"": str(exc)}, status=400) group_list = list(cursor_result) if not group_list: return Response(status=204) if any([group.issue_category == GroupCategory.PERFORMANCE for group in group_list]): return Response({""detail"": ""Cannot delete performance issues.""}, status=403) groups_by_project_id = defaultdict(list) for group in group_list: groups_by_project_id[group.project_id].append(group) for project in projects: delete_group_list( request, project, groups_by_project_id.get(project.id, []), delete_type=""delete"" ) return Response(status=204) ","def delete_groups( request: Request, projects: Sequence[""Project""], organization_id: int, search_fn: SearchFunction, ) -> Response: """""" `search_fn` refers to the `search.query` method with the appropriate project, org, environment, and search params already bound """""" group_ids = request.GET.getlist(""id"") if group_ids: group_list = list( Group.objects.filter( project__in=projects, project__organization_id=organization_id, id__in=set(group_ids), ).exclude(status__in=[GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS]) ) else: try: cursor_result, _ = search_fn( { ""limit"": BULK_MUTATION_LIMIT, ""paginator_options"": {""max_limit"": BULK_MUTATION_LIMIT}, } ) except ValidationError as exc: return Response({""detail"": str(exc)}, status=400) group_list = list(cursor_result) if not group_list: return Response(status=204) if any([group.issue_category == GroupCategory.PERFORMANCE for group in group_list]): raise rest_framework.exceptions.PermissionDenied(detail=""Cannot delete performance issues."") groups_by_project_id = defaultdict(list) for group in group_list: groups_by_project_id[group.project_id].append(group) for project in projects: delete_group_list( request, project, groups_by_project_id.get(project.id, []), delete_type=""delete"" ) return Response(status=204) " 10784,"def jit(func_or_sig=None, argtypes=None, device=False, inline=False, bind=True, link=[], debug=None, **kws): """""" JIT compile a python function conforming to the CUDA Python specification. If a signature is supplied, then a function is returned that takes a function to compile. :param func_or_sig: A function to JIT compile, or a signature of a function to compile. If a function is supplied, then an :class:`numba.cuda.compiler.AutoJitCUDAKernel` is returned. If a signature is supplied, then a function which takes a function to compile and returns an :class:`numba.cuda.compiler.AutoJitCUDAKernel` is returned. .. note:: A kernel cannot have any return value. :param device: Indicates whether this is a device function. :type device: bool :param bind: Force binding to CUDA context immediately :type bind: bool :param link: A list of files containing PTX source to link with the function :type link: list :param debug: If True, check for exceptions thrown when executing the kernel. Since this degrades performance, this should only be used for debugging purposes. Defaults to False. (The default value can be overridden by setting environment variable ``NUMBA_CUDA_DEBUGINFO=1``.) :param fastmath: If true, enables flush-to-zero and fused-multiply-add, disables precise division and square root. This parameter has no effect on device function, whose fastmath setting depends on the kernel function from which they are called. :param max_registers: Limit the kernel to using at most this number of registers per thread. Useful for increasing occupancy. """""" debug = config.CUDA_DEBUGINFO_DEFAULT if debug is None else debug if link and config.ENABLE_CUDASIM: raise NotImplementedError('Cannot link PTX in the simulator') if 'boundscheck' in kws: raise NotImplementedError(""bounds checking is not supported for CUDA"") fastmath = kws.get('fastmath', False) if argtypes is None and not sigutils.is_signature(func_or_sig): if func_or_sig is None: if config.ENABLE_CUDASIM: def autojitwrapper(func): return FakeCUDAKernel(func, device=device, fastmath=fastmath, debug=debug) else: def autojitwrapper(func): return jit(func, device=device, bind=bind, debug=debug, **kws) return autojitwrapper # func_or_sig is a function else: if config.ENABLE_CUDASIM: return FakeCUDAKernel(func_or_sig, device=device, fastmath=fastmath, debug=debug) elif device: return jitdevice(func_or_sig, debug=debug, **kws) else: targetoptions = kws.copy() targetoptions['debug'] = debug return AutoJitCUDAKernel(func_or_sig, bind=bind, targetoptions=targetoptions) else: if config.ENABLE_CUDASIM: def jitwrapper(func): return FakeCUDAKernel(func, device=device, fastmath=fastmath, debug=debug) return jitwrapper restype, argtypes = convert_types(func_or_sig, argtypes) if restype and not device and restype != types.void: raise TypeError(""CUDA kernel must have void return type."") def kernel_jit(func): kernel = compile_kernel(func, argtypes, link=link, debug=debug, inline=inline, fastmath=fastmath) # Force compilation for the current context if bind: kernel.bind() return kernel def device_jit(func): return compile_device(func, restype, argtypes, inline=inline, debug=debug) if device: return device_jit else: return kernel_jit ","def jit(func_or_sig=None, argtypes=None, device=False, inline=False, bind=True, link=[], debug=None, **kws): """""" JIT compile a python function conforming to the CUDA Python specification. If a signature is supplied, then a function is returned that takes a function to compile. :param func_or_sig: A function to JIT compile, or a signature of a function to compile. If a function is supplied, then a :class:`numba.cuda.compiler.AutoJitCUDAKernel` is returned. If a signature is supplied, then a function which takes a function to compile and returns an :class:`numba.cuda.compiler.AutoJitCUDAKernel` is returned. .. note:: A kernel cannot have any return value. :param device: Indicates whether this is a device function. :type device: bool :param bind: Force binding to CUDA context immediately :type bind: bool :param link: A list of files containing PTX source to link with the function :type link: list :param debug: If True, check for exceptions thrown when executing the kernel. Since this degrades performance, this should only be used for debugging purposes. Defaults to False. (The default value can be overridden by setting environment variable ``NUMBA_CUDA_DEBUGINFO=1``.) :param fastmath: If true, enables flush-to-zero and fused-multiply-add, disables precise division and square root. This parameter has no effect on device function, whose fastmath setting depends on the kernel function from which they are called. :param max_registers: Limit the kernel to using at most this number of registers per thread. Useful for increasing occupancy. """""" debug = config.CUDA_DEBUGINFO_DEFAULT if debug is None else debug if link and config.ENABLE_CUDASIM: raise NotImplementedError('Cannot link PTX in the simulator') if 'boundscheck' in kws: raise NotImplementedError(""bounds checking is not supported for CUDA"") fastmath = kws.get('fastmath', False) if argtypes is None and not sigutils.is_signature(func_or_sig): if func_or_sig is None: if config.ENABLE_CUDASIM: def autojitwrapper(func): return FakeCUDAKernel(func, device=device, fastmath=fastmath, debug=debug) else: def autojitwrapper(func): return jit(func, device=device, bind=bind, debug=debug, **kws) return autojitwrapper # func_or_sig is a function else: if config.ENABLE_CUDASIM: return FakeCUDAKernel(func_or_sig, device=device, fastmath=fastmath, debug=debug) elif device: return jitdevice(func_or_sig, debug=debug, **kws) else: targetoptions = kws.copy() targetoptions['debug'] = debug return AutoJitCUDAKernel(func_or_sig, bind=bind, targetoptions=targetoptions) else: if config.ENABLE_CUDASIM: def jitwrapper(func): return FakeCUDAKernel(func, device=device, fastmath=fastmath, debug=debug) return jitwrapper restype, argtypes = convert_types(func_or_sig, argtypes) if restype and not device and restype != types.void: raise TypeError(""CUDA kernel must have void return type."") def kernel_jit(func): kernel = compile_kernel(func, argtypes, link=link, debug=debug, inline=inline, fastmath=fastmath) # Force compilation for the current context if bind: kernel.bind() return kernel def device_jit(func): return compile_device(func, restype, argtypes, inline=inline, debug=debug) if device: return device_jit else: return kernel_jit " 22085,"def _ntlm_authenticate_info(request): """""" Extract host information in an NTLM_AUTH message """""" if (len(request) < 52): LOGGER.warning(""NTLM message is too short (%d) but should be at least "" ""52 char long"", len(request)) return None value = [] offset, ln = struct.unpack('IH', request[32:36] + request[28:30]) if ln > 0: value.append(""domain:"" + \ encode_b64(_extract_substr(request, offset, ln)).decode()) has_version = False # Flags are not present in a NTLM_AUTH message when the data block starts # before index 64 if offset >= 64 and len(request) > 64: flags, = struct.unpack('I', request[60:64]) has_version = flags & flag_version off, ln = struct.unpack('IH', request[40:44] + request[36:38]) if ln > 0: value.append(""user-name:"" + \ encode_b64(_extract_substr(request, off, ln)).decode()) off, ln = struct.unpack('IH', request[48:52] + request[44:46]) if ln > 0: value.append(""workstation:"" + \ encode_b64(_extract_substr(request, off, ln)).decode()) # Get OS Version if the `Negotiate Version` is set # (NTLM_AUTH messages with a data block starting before index 72 do not # contain information on the version) if offset >= 72 and len(request) > 72 and has_version: maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] + request[65:66] + request[66:68] + request[71:72]) version = ""{}.{}.{}"".format(maj, minor, bld).encode() value.append(""ntlm-os:{}"".format(encode_b64(version).decode())) value.append(""ntlm-version:{}"".format(ntlm_ver)) return 'NTLM ' + ','.join(value) ","def _ntlm_authenticate_info(request): """""" Extract host information in an NTLM_AUTH message """""" if (len(request) < 52): LOGGER.warning(""NTLM message is too short (%d) but should be at least "" ""52 char long"", len(request)) return None value = [] offset, ln = struct.unpack('IH', request[32:36] + request[28:30]) if ln > 0: value.append(""domain:"" + \ encode_b64(_extract_substr(request, offset, ln)).decode()) has_version = False # Flags are not present in a NTLM_AUTH message when the data block starts # before index 64 if offset >= 64 and len(request) > 64: flags, = struct.unpack('I', request[60:64]) has_version = flags & flag_version off, ln = struct.unpack('IH', request[40:44] + request[36:38]) if ln > 0: value.append(""user-name:"" + \ encode_b64(_extract_substr(request, off, ln)).decode()) off, ln = struct.unpack('IH', request[48:52] + request[44:46]) if ln: value.append(""workstation:"" + \ encode_b64(_extract_substr(request, off, ln)).decode()) # Get OS Version if the `Negotiate Version` is set # (NTLM_AUTH messages with a data block starting before index 72 do not # contain information on the version) if offset >= 72 and len(request) > 72 and has_version: maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] + request[65:66] + request[66:68] + request[71:72]) version = ""{}.{}.{}"".format(maj, minor, bld).encode() value.append(""ntlm-os:{}"".format(encode_b64(version).decode())) value.append(""ntlm-version:{}"".format(ntlm_ver)) return 'NTLM ' + ','.join(value) " 7229,"def regionprops(label_image, intensity_image=None, cache=True, coordinates=None): r""""""Measure properties of labeled image regions. Parameters ---------- label_image : (N, M) ndarray Labeled input image. Labels with value 0 are ignored. .. versionchanged:: 0.14.1 Previously, ``label_image`` was processed by ``numpy.squeeze`` and so any number of singleton dimensions was allowed. This resulted in inconsistent handling of images with singleton dimensions. To recover the old behaviour, use ``regionprops(np.squeeze(label_image), ...)``. intensity_image : (N, M) ndarray, optional Intensity (i.e., input) image with same size as labeled image. Default is None. cache : bool, optional Determine whether to cache calculated properties. The computation is much faster for cached properties, whereas the memory consumption increases. coordinates : DEPRECATED This argument is deprecated and will be removed in a future version of scikit-image. See :ref:`Coordinate conventions ` for more details. .. deprecated:: 0.16.0 Use ""rc"" coordinates everywhere. It may be sufficient to call ``numpy.transpose`` on your label image to get the same values as 0.15 and earlier. However, for some properties, the transformation will be less trivial. For example, the new orientation is :math:`\frac{\pi}{2}` plus the old orientation. Returns ------- properties : list of RegionProperties Each item describes one labeled region, and can be accessed using the attributes listed below. Notes ----- The following properties can be accessed as attributes or keys: **area** : int Number of pixels of the region. **bbox** : tuple Bounding box ``(min_row, min_col, max_row, max_col)``. Pixels belonging to the bounding box are in the half-open interval ``[min_row; max_row)`` and ``[min_col; max_col)``. **bbox_area** : int Number of pixels of bounding box. **centroid** : array Centroid coordinate tuple ``(row, col)``. **convex_area** : int Number of pixels of convex hull image, which is the smallest convex polygon that encloses the region. **convex_image** : (H, J) ndarray Binary convex hull image which has the same size as bounding box. **coords** : (N, 2) ndarray Coordinate list ``(row, col)`` of the region. **eccentricity** : float Eccentricity of the ellipse that has the same second-moments as the region. The eccentricity is the ratio of the focal distance (distance between focal points) over the major axis length. The value is in the interval [0, 1). When it is 0, the ellipse becomes a circle. **equivalent_diameter** : float The diameter of a circle with the same area as the region. **euler_number** : int Euler characteristic of region. Computed as number of objects (= 1) subtracted by number of holes (8-connectivity). **extent** : float Ratio of pixels in the region to pixels in the total bounding box. Computed as ``area / (rows * cols)`` **filled_area** : int Number of pixels of the region will all the holes filled in. Describes the area of the filled_image. **filled_image** : (H, J) ndarray Binary region image with filled holes which has the same size as bounding box. **image** : (H, J) ndarray Sliced binary region image which has the same size as bounding box. **inertia_tensor** : ndarray Inertia tensor of the region for the rotation around its mass. **inertia_tensor_eigvals** : tuple The eigenvalues of the inertia tensor in decreasing order. **intensity_image** : ndarray Image inside region bounding box. **label** : int The label in the labeled input image. **local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box. **major_axis_length** : float The length of the major axis of the ellipse that has the same normalized second central moments as the region. **max_intensity** : float Value with the greatest intensity in the region. **mean_intensity** : float Value with the mean intensity in the region. **min_intensity** : float Value with the least intensity in the region. **minor_axis_length** : float The length of the minor axis of the ellipse that has the same normalized second central moments as the region. **moments** : (3, 3) ndarray Spatial moments up to 3rd order:: m_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **moments_central** : (3, 3) ndarray Central moments (translation invariant) up to 3rd order:: mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's centroid. **moments_hu** : tuple Hu moments (translation, scale and rotation invariant). **moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) up to 3rd order:: nu_ij = mu_ij / m_00^[(i+j)/2 + 1] where `m_00` is the zeroth spatial moment. **orientation** : float Angle between the 0th axis (rows) and the major axis of the ellipse that has the same second moments as the region, ranging from `-pi/2` to `pi/2` counter-clockwise. **perimeter** : float Perimeter of object which approximates the contour as a line through the centers of border pixels using a 4-connectivity. **slice** : tuple of slices A slice to extract the object from the source image. **solidity** : float Ratio of pixels in the region to pixels of the convex hull image. **weighted_centroid** : array Centroid coordinate tuple ``(row, col)`` weighted with intensity image. **weighted_local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box, weighted with intensity image. **weighted_moments** : (3, 3) ndarray Spatial moments of intensity image up to 3rd order:: wm_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **weighted_moments_central** : (3, 3) ndarray Central moments (translation invariant) of intensity image up to 3rd order:: wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's weighted centroid. **weighted_moments_hu** : tuple Hu moments (translation, scale and rotation invariant) of intensity image. **weighted_moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) of intensity image up to 3rd order:: wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1] where ``wm_00`` is the zeroth spatial moment (intensity-weighted area). Each region also supports iteration, so that you can do:: for prop in region: print(prop, region[prop]) See Also -------- label References ---------- .. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing: Core Algorithms. Springer-Verlag, London, 2009. .. [2] B. Jähne. Digital Image Processing. Springer-Verlag, Berlin-Heidelberg, 6. edition, 2005. .. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image Features, from Lecture notes in computer science, p. 676. Springer, Berlin, 1993. .. [4] https://en.wikipedia.org/wiki/Image_moment Examples -------- >>> from skimage import data, util >>> from skimage.measure import label >>> img = util.img_as_ubyte(data.coins()) > 110 >>> label_img = label(img, connectivity=img.ndim) >>> props = regionprops(label_img) >>> # centroid of first labeled object >>> props[0].centroid (22.72987986048314, 81.91228523446583) >>> # centroid of first labeled object >>> props[0]['centroid'] (22.72987986048314, 81.91228523446583) """""" if label_image.ndim not in (2, 3): raise TypeError('Only 2-D and 3-D images supported.') if not np.issubdtype(label_image.dtype, np.integer): if np.issubdtype(label_image.dtype, np.bool_): raise TypeError( 'Non-integer image types are ambiguous: ' 'use skimage.measure.label to label the connected' 'components of label_image,' 'or label_image.astype(np.uint8) to interpret' 'the True values as a single label.') else: raise TypeError( 'Non-integer label_image types are ambiguous') if coordinates is not None: if coordinates == 'rc': msg = ('The coordinates keyword argument to skimage.measure.' 'regionprops is deprecated. All features are now computed ' 'in rc (row-column) coordinates. Please remove ' '`coordinates=""rc""` from all calls to regionprops before ' 'updating scikit-image.') warn(msg, stacklevel=2, category=FutureWarning) else: msg = ('Values other than ""rc"" for the ""coordinates"" argument ' 'to skimage.measure.regionprops are no longer supported. ' 'You should update your code to use ""rc"" coordinates and ' 'stop using the ""coordinates"" argument, or use skimage ' 'version 0.15.x or earlier.') raise ValueError(msg) regions = [] objects = ndi.find_objects(label_image) for i, sl in enumerate(objects): if sl is None: continue label = i + 1 props = RegionProperties(sl, label, label_image, intensity_image, cache) regions.append(props) return regions ","def regionprops(label_image, intensity_image=None, cache=True, coordinates=None): r""""""Measure properties of labeled image regions. Parameters ---------- label_image : (N, M) ndarray Labeled input image. Labels with value 0 are ignored. .. versionchanged:: 0.14.1 Previously, ``label_image`` was processed by ``numpy.squeeze`` and so any number of singleton dimensions was allowed. This resulted in inconsistent handling of images with singleton dimensions. To recover the old behaviour, use ``regionprops(np.squeeze(label_image), ...)``. intensity_image : (N, M) ndarray, optional Intensity (i.e., input) image with same size as labeled image. Default is None. cache : bool, optional Determine whether to cache calculated properties. The computation is much faster for cached properties, whereas the memory consumption increases. coordinates : DEPRECATED This argument is deprecated and will be removed in a future version of scikit-image. See :ref:`Coordinate conventions ` for more details. .. deprecated:: 0.16.0 Use ""rc"" coordinates everywhere. It may be sufficient to call ``numpy.transpose`` on your label image to get the same values as 0.15 and earlier. However, for some properties, the transformation will be less trivial. For example, the new orientation is :math:`\frac{\pi}{2}` plus the old orientation. Returns ------- properties : list of RegionProperties Each item describes one labeled region, and can be accessed using the attributes listed below. Notes ----- The following properties can be accessed as attributes or keys: **area** : int Number of pixels of the region. **bbox** : tuple Bounding box ``(min_row, min_col, max_row, max_col)``. Pixels belonging to the bounding box are in the half-open interval ``[min_row; max_row)`` and ``[min_col; max_col)``. **bbox_area** : int Number of pixels of bounding box. **centroid** : array Centroid coordinate tuple ``(row, col)``. **convex_area** : int Number of pixels of convex hull image, which is the smallest convex polygon that encloses the region. **convex_image** : (H, J) ndarray Binary convex hull image which has the same size as bounding box. **coords** : (N, 2) ndarray Coordinate list ``(row, col)`` of the region. **eccentricity** : float Eccentricity of the ellipse that has the same second-moments as the region. The eccentricity is the ratio of the focal distance (distance between focal points) over the major axis length. The value is in the interval [0, 1). When it is 0, the ellipse becomes a circle. **equivalent_diameter** : float The diameter of a circle with the same area as the region. **euler_number** : int Euler characteristic of region. Computed as number of objects (= 1) subtracted by number of holes (8-connectivity). **extent** : float Ratio of pixels in the region to pixels in the total bounding box. Computed as ``area / (rows * cols)`` **filled_area** : int Number of pixels of the region will all the holes filled in. Describes the area of the filled_image. **filled_image** : (H, J) ndarray Binary region image with filled holes which has the same size as bounding box. **image** : (H, J) ndarray Sliced binary region image which has the same size as bounding box. **inertia_tensor** : ndarray Inertia tensor of the region for the rotation around its mass. **inertia_tensor_eigvals** : tuple The eigenvalues of the inertia tensor in decreasing order. **intensity_image** : ndarray Image inside region bounding box. **label** : int The label in the labeled input image. **local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box. **major_axis_length** : float The length of the major axis of the ellipse that has the same normalized second central moments as the region. **max_intensity** : float Value with the greatest intensity in the region. **mean_intensity** : float Value with the mean intensity in the region. **min_intensity** : float Value with the least intensity in the region. **minor_axis_length** : float The length of the minor axis of the ellipse that has the same normalized second central moments as the region. **moments** : (3, 3) ndarray Spatial moments up to 3rd order:: m_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **moments_central** : (3, 3) ndarray Central moments (translation invariant) up to 3rd order:: mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's centroid. **moments_hu** : tuple Hu moments (translation, scale and rotation invariant). **moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) up to 3rd order:: nu_ij = mu_ij / m_00^[(i+j)/2 + 1] where `m_00` is the zeroth spatial moment. **orientation** : float Angle between the 0th axis (rows) and the major axis of the ellipse that has the same second moments as the region, ranging from `-pi/2` to `pi/2` counter-clockwise. **perimeter** : float Perimeter of object which approximates the contour as a line through the centers of border pixels using a 4-connectivity. **slice** : tuple of slices A slice to extract the object from the source image. **solidity** : float Ratio of pixels in the region to pixels of the convex hull image. **weighted_centroid** : array Centroid coordinate tuple ``(row, col)`` weighted with intensity image. **weighted_local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box, weighted with intensity image. **weighted_moments** : (3, 3) ndarray Spatial moments of intensity image up to 3rd order:: wm_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **weighted_moments_central** : (3, 3) ndarray Central moments (translation invariant) of intensity image up to 3rd order:: wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's weighted centroid. **weighted_moments_hu** : tuple Hu moments (translation, scale and rotation invariant) of intensity image. **weighted_moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) of intensity image up to 3rd order:: wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1] where ``wm_00`` is the zeroth spatial moment (intensity-weighted area). Each region also supports iteration, so that you can do:: for prop in region: print(prop, region[prop]) See Also -------- label References ---------- .. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing: Core Algorithms. Springer-Verlag, London, 2009. .. [2] B. Jähne. Digital Image Processing. Springer-Verlag, Berlin-Heidelberg, 6. edition, 2005. .. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image Features, from Lecture notes in computer science, p. 676. Springer, Berlin, 1993. .. [4] https://en.wikipedia.org/wiki/Image_moment Examples -------- >>> from skimage import data, util >>> from skimage.measure import label >>> img = util.img_as_ubyte(data.coins()) > 110 >>> label_img = label(img, connectivity=img.ndim) >>> props = regionprops(label_img) >>> # centroid of first labeled object >>> props[0].centroid (22.72987986048314, 81.91228523446583) >>> # centroid of first labeled object >>> props[0]['centroid'] (22.72987986048314, 81.91228523446583) """""" if label_image.ndim not in (2, 3): raise TypeError('Only 2-D and 3-D images supported.') if not np.issubdtype(label_image.dtype, np.integer): if label_image.dtype.kind == 'b': raise TypeError( 'Non-integer image types are ambiguous: ' 'use skimage.measure.label to label the connected' 'components of label_image,' 'or label_image.astype(np.uint8) to interpret' 'the True values as a single label.') else: raise TypeError( 'Non-integer label_image types are ambiguous') if coordinates is not None: if coordinates == 'rc': msg = ('The coordinates keyword argument to skimage.measure.' 'regionprops is deprecated. All features are now computed ' 'in rc (row-column) coordinates. Please remove ' '`coordinates=""rc""` from all calls to regionprops before ' 'updating scikit-image.') warn(msg, stacklevel=2, category=FutureWarning) else: msg = ('Values other than ""rc"" for the ""coordinates"" argument ' 'to skimage.measure.regionprops are no longer supported. ' 'You should update your code to use ""rc"" coordinates and ' 'stop using the ""coordinates"" argument, or use skimage ' 'version 0.15.x or earlier.') raise ValueError(msg) regions = [] objects = ndi.find_objects(label_image) for i, sl in enumerate(objects): if sl is None: continue label = i + 1 props = RegionProperties(sl, label, label_image, intensity_image, cache) regions.append(props) return regions " 38650,"def main(): # Setup command line options argparser = argparse.ArgumentParser() output_options = argparser.add_argument_group( 'Options controlling ReFrame output' ) locate_options = argparser.add_argument_group( 'Options for discovering checks' ) select_options = argparser.add_argument_group( 'Options for selecting checks' ) action_options = argparser.add_argument_group( 'Options controlling actions' ) run_options = argparser.add_argument_group( 'Options controlling the execution of checks' ) env_options = argparser.add_argument_group( 'Options controlling the ReFrame environment' ) misc_options = argparser.add_argument_group('Miscellaneous options') # Output directory options output_options.add_argument( '--prefix', action='store', metavar='DIR', help='Set general directory prefix to DIR', envvar='RFM_PREFIX', configvar='systems/prefix' ) output_options.add_argument( '-o', '--output', action='store', metavar='DIR', help='Set output directory prefix to DIR', envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir' ) output_options.add_argument( '-s', '--stage', action='store', metavar='DIR', help='Set stage directory prefix to DIR', envvar='RFM_STAGE_DIR', configvar='systems/stagedir' ) output_options.add_argument( '--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT', help=('Append a timestamp to the output and stage directory prefixes ' '(default: ""%%FT%%T"")'), envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs' ) output_options.add_argument( '--perflogdir', action='store', metavar='DIR', help=('Set performance log data directory prefix ' '(relevant only to the filelog log handler)'), envvar='RFM_PERFLOG_DIR', configvar='logging/handlers_perflog/filelog_basedir' ) output_options.add_argument( '--keep-stage-files', action='store_true', help='Keep stage directories even for successful checks', envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files' ) output_options.add_argument( '--save-log-files', action='store_true', default=False, help='Save ReFrame log files to the output directory', envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files' ) # Check discovery options locate_options.add_argument( '-c', '--checkpath', action='append', metavar='PATH', help=""Add PATH to the check search path list"", envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path' ) locate_options.add_argument( '-R', '--recursive', action='store_true', help='Search for checks in the search path recursively', envvar='RFM_CHECK_SEARCH_RECURSIVE', configvar='general/check_search_recursive' ) locate_options.add_argument( '--ignore-check-conflicts', action='store_true', help='Skip checks with conflicting names', envvar='RFM_IGNORE_CHECK_CONFLICTS', configvar='general/ignore_check_conflicts' ) # Select options select_options.add_argument( '-t', '--tag', action='append', dest='tags', metavar='PATTERN', default=[], help='Select checks with at least one tag matching PATTERN' ) select_options.add_argument( '-n', '--name', action='append', dest='names', default=[], metavar='PATTERN', help='Select checks whose name matches PATTERN' ) select_options.add_argument( '-x', '--exclude', action='append', dest='exclude_names', metavar='PATTERN', default=[], help='Exclude checks whose name matches PATTERN' ) select_options.add_argument( '-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN', help=('Select checks with at least one ' 'programming environment matching PATTERN') ) select_options.add_argument( '--gpu-only', action='store_true', help='Select only GPU checks' ) select_options.add_argument( '--cpu-only', action='store_true', help='Select only CPU checks' ) # Action options action_options.add_argument( '-l', '--list', action='store_true', help='List the selected checks' ) action_options.add_argument( '-L', '--list-detailed', action='store_true', help='List the selected checks providing details for each test' ) action_options.add_argument( '-r', '--run', action='store_true', help='Run the selected checks' ) # Run options run_options.add_argument( '-A', '--account', action='store', help='Use ACCOUNT for submitting jobs (Slurm)' ) run_options.add_argument( '-P', '--partition', action='store', metavar='PART', help='Use PART for submitting jobs (Slurm/PBS/Torque)' ) run_options.add_argument( '--reservation', action='store', metavar='RES', help='Use RES for submitting jobs (Slurm)' ) run_options.add_argument( '--nodelist', action='store', help='Run checks on the selected list of nodes (Slurm)' ) run_options.add_argument( '--exclude-nodes', action='store', metavar='NODELIST', help='Exclude the list of nodes from running checks (Slurm)' ) run_options.add_argument( '--job-option', action='append', metavar='OPT', dest='job_options', default=[], help='Pass option OPT to job scheduler' ) run_options.add_argument( '--force-local', action='store_true', help='Force local execution of checks' ) run_options.add_argument( '--skip-sanity-check', action='store_true', help='Skip sanity checking' ) run_options.add_argument( '--skip-performance-check', action='store_true', help='Skip performance checking' ) run_options.add_argument( '--strict', action='store_true', help='Enforce strict performance checking' ) run_options.add_argument( '--skip-system-check', action='store_true', help='Skip system check' ) run_options.add_argument( '--skip-prgenv-check', action='store_true', help='Skip programming environment check' ) run_options.add_argument( '--exec-policy', metavar='POLICY', action='store', choices=['async', 'serial'], default='async', help='Set the execution policy of ReFrame (default: ""async"")' ) run_options.add_argument( '--mode', action='store', help='Execution mode to use' ) run_options.add_argument( '--max-retries', metavar='NUM', action='store', default=0, help='Set the maximum number of times a failed regression test ' 'may be retried (default: 0)' ) run_options.add_argument( '--flex-alloc-tasks', action='store', dest='flex_alloc_tasks', metavar='{all|idle|NUM}', default=None, help='*deprecated*, please use --flex-alloc-nodes instead' ) run_options.add_argument( '--flex-alloc-nodes', action='store', dest='flex_alloc_nodes', metavar='{all|idle|NUM}', default=None, help='Set strategy for the flexible node allocation (default: ""idle"").' ) env_options.add_argument( '-M', '--map-module', action='append', metavar='MAPPING', dest='module_mappings', default=[], help='Add a module mapping', envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings' ) env_options.add_argument( '-m', '--module', action='append', default=[], metavar='MOD', dest='user_modules', help='Load module MOD before running any regression check', envvar='RFM_USER_MODULES ,', configvar='general/user_modules' ) env_options.add_argument( '--module-mappings', action='store', metavar='FILE', dest='module_map_file', help='Load module mappings from FILE', envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file' ) env_options.add_argument( '-u', '--unload-module', action='append', metavar='MOD', dest='unload_modules', default=[], help='Unload module MOD before running any regression check', envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules' ) env_options.add_argument( '--purge-env', action='store_true', dest='purge_env', default=False, help='Unload all modules before running any regression check', envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment' ) env_options.add_argument( '--non-default-craype', action='store_true', help='Test a non-default Cray Programming Environment', envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype' ) # Miscellaneous options misc_options.add_argument( '-C', '--config-file', action='store', dest='config_file', metavar='FILE', help='Set configuration file', envvar='RFM_CONFIG_FILE' ) misc_options.add_argument( '--nocolor', action='store_false', dest='colorize', help='Disable coloring of output', envvar='RFM_COLORIZE', configvar='general/colorize' ) misc_options.add_argument( '--failure-stats', action='store_true', help='Print failure statistics' ) misc_options.add_argument( '--performance-report', action='store_true', help='Print a report for performance tests' ) misc_options.add_argument( '--show-config', action='store', nargs='?', const='all', metavar='PARAM', help='Print the value of configuration parameter PARAM and exit' ) misc_options.add_argument( '--system', action='store', help='Load configuration for SYSTEM', envvar='RFM_SYSTEM' ) misc_options.add_argument( '-V', '--version', action='version', version=os_ext.reframe_version() ) misc_options.add_argument( '-v', '--verbose', action='count', help='Increase verbosity level of output', envvar='RFM_VERBOSE', configvar='general/verbose' ) # Options not associated with command-line arguments argparser.add_argument( dest='graylog_server', envvar='RFM_GRAYLOG_SERVER', configvar='logging/handlers_perflog/graylog_address', help='Graylog server address' ) if len(sys.argv) == 1: argparser.print_help() sys.exit(1) # Parse command line options = argparser.parse_args() # First configure logging with our generic configuration so as to be able # to print pretty messages; logging will be reconfigured by user's # configuration later site_config = config.load_config( os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py') ) site_config.select_subconfig('generic') options.update_config(site_config) logging.configure_logging(site_config) logging.getlogger().colorize = site_config.get('general/0/colorize') printer = PrettyPrinter() printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) # Now configure ReFrame according to the user configuration file try: try: site_config = config.load_config(options.config_file) except ReframeDeprecationWarning as e: printer.warning(e) converted = config.convert_old_config(options.config_file) printer.warning( f""configuration file has been converted "" f""to the new syntax here: '{converted}'"" ) site_config = config.load_config(converted) site_config.validate() site_config.select_subconfig(options.system) for err in options.update_config(site_config): printer.warning(str(err)) logging.configure_logging(site_config) except (OSError, ConfigError) as e: printer.error(f'failed to load configuration: {e}') sys.exit(1) logging.getlogger().colorize = site_config.get('general/0/colorize') printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) try: runtime.init_runtime(site_config) except ConfigError as e: printer.error(f'failed to initialize runtime: {e}') sys.exit(1) rt = runtime.runtime() try: if site_config.get('general/0/module_map_file'): rt.modules_system.load_mapping_from_file( site_config.get('general/0/module_map_file') ) if site_config.get('general/0/module_mappings'): for m in site_config.get('general/0/module_mappings'): rt.modules_system.load_mapping(m) except (ConfigError, OSError) as e: printer.error('could not load module mappings: %s' % e) sys.exit(1) if options.mode: try: mode_args = rt.get_option(f'modes/@{options.mode}/options') # Parse the mode's options and reparse the command-line options = argparser.parse_args(mode_args) options = argparser.parse_args(namespace=options.cmd_options) options.update_config(rt.site_config) except ConfigError as e: printer.error('could not obtain execution mode: %s' % e) sys.exit(1) if (os_ext.samefile(rt.stage_prefix, rt.output_prefix) and not site_config.get('general/0/keep_stage_files')): printer.error(""stage and output refer to the same directory; "" ""if this is on purpose, please use the "" ""'--keep-stage-files' option."") sys.exit(1) # Show configuration after everything is set up if options.show_config: config_param = options.show_config if config_param == 'all': printer.info(str(rt.site_config)) else: value = rt.get_option(config_param) if value is None: printer.error( f'no such configuration parameter found: {config_param}' ) else: printer.info(json.dumps(value, indent=2)) sys.exit(0) printer.debug(format_env(options.env_vars)) # Setup the check loader loader = RegressionCheckLoader( load_path=site_config.get('general/0/check_search_path'), recurse=site_config.get('general/0/check_search_recursive'), ignore_conflicts=site_config.get('general/0/ignore_check_conflicts') ) def print_infoline(param, value): param = param + ':' printer.info(f"" {param.ljust(18)} {value}"") # Print command line printer.info(f""[ReFrame Setup]"") print_infoline('version', os_ext.reframe_version()) print_infoline('command', repr(' '.join(sys.argv))) print_infoline('launched by', f""{os_ext.osuser() or ''}@{socket.gethostname()}"") print_infoline('working directory', repr(os.getcwd())) print_infoline('check search path', f""{'(R) ' if loader.recurse else ''}"" f""{':'.join(loader.load_path)!r}"") print_infoline('stage directory', repr(rt.stage_prefix)) print_infoline('output directory', repr(rt.output_prefix)) printer.info('') try: # Locate and load checks try: checks_found = loader.load_all() except OSError as e: raise ReframeError from e # Filter checks by name checks_matched = checks_found if options.exclude_names: for name in options.exclude_names: checks_matched = filter(filters.have_not_name(name), checks_matched) if options.names: checks_matched = filter(filters.have_name('|'.join(options.names)), checks_matched) # Filter checks by tags for tag in options.tags: checks_matched = filter(filters.have_tag(tag), checks_matched) # Filter checks by prgenv if not options.skip_prgenv_check: for prgenv in options.prgenv: checks_matched = filter(filters.have_prgenv(prgenv), checks_matched) # Filter checks by system if not options.skip_system_check: checks_matched = filter( filters.have_partition(rt.system.partitions), checks_matched) # Filter checks further if options.gpu_only and options.cpu_only: printer.error(""options `--gpu-only' and `--cpu-only' "" ""are mutually exclusive"") sys.exit(1) if options.gpu_only: checks_matched = filter(filters.have_gpu_only(), checks_matched) elif options.cpu_only: checks_matched = filter(filters.have_cpu_only(), checks_matched) # Determine the allowed programming environments allowed_environs = {e.name for env_patt in options.prgenv for p in rt.system.partitions for e in p.environs if re.match(env_patt, e.name)} # Generate the test cases, validate dependencies and sort them checks_matched = list(checks_matched) testcases = generate_testcases(checks_matched, options.skip_system_check, options.skip_prgenv_check, allowed_environs) testgraph = dependency.build_deps(testcases) dependency.validate_deps(testgraph) testcases = dependency.toposort(testgraph) # Manipulate ReFrame's environment if site_config.get('general/0/purge_environment'): rt.modules_system.unload_all() else: for m in site_config.get('general/0/unload_modules'): rt.modules_system.unload_module(m) # Load the environment for the current system try: runtime.loadenv(rt.system.preload_environ) except EnvironError as e: printer.error(""failed to load current system's environment; "" ""please check your configuration"") printer.debug(str(e)) raise for m in site_config.get('general/0/user_modules'): try: rt.modules_system.load_module(m, force=True) except EnvironError as e: printer.warning(""could not load module '%s' correctly: "" ""Skipping..."" % m) printer.debug(str(e)) if options.flex_alloc_tasks: printer.warning(""`--flex-alloc-tasks' is deprecated and "" ""will be removed in the future; "" ""you should use --flex-alloc-nodes instead"") options.flex_alloc_nodes = (options.flex_alloc_nodes or options.flex_alloc_tasks) options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle' # Act on checks success = True if options.list: # List matched checks list_checks(list(checks_matched), printer) elif options.list_detailed: # List matched checks with details list_checks(list(checks_matched), printer, detailed=True) elif options.run: # Setup the execution policy if options.exec_policy == 'serial': exec_policy = SerialExecutionPolicy() elif options.exec_policy == 'async': exec_policy = AsynchronousExecutionPolicy() else: # This should not happen, since choices are handled by # argparser printer.error(""unknown execution policy `%s': Exiting..."") sys.exit(1) exec_policy.skip_system_check = options.skip_system_check exec_policy.force_local = options.force_local exec_policy.strict_check = options.strict exec_policy.skip_sanity_check = options.skip_sanity_check exec_policy.skip_performance_check = options.skip_performance_check exec_policy.keep_stage_files = site_config.get( 'general/0/keep_stage_files' ) try: errmsg = ""invalid option for --flex-alloc-nodes: '{0}'"" sched_flex_alloc_nodes = int(options.flex_alloc_nodes) if sched_flex_alloc_nodes <= 0: raise ConfigError(errmsg.format(options.flex_alloc_nodes)) except ValueError: if not options.flex_alloc_nodes.casefold() in {'idle', 'all'}: raise ConfigError( errmsg.format(options.flex_alloc_nodes)) from None sched_flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes exec_policy.flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_account = options.account exec_policy.sched_partition = options.partition exec_policy.sched_reservation = options.reservation exec_policy.sched_nodelist = options.nodelist exec_policy.sched_exclude_nodelist = options.exclude_nodes exec_policy.sched_options = options.job_options try: max_retries = int(options.max_retries) except ValueError: raise ConfigError('--max-retries is not a valid integer: %s' % max_retries) from None runner = Runner(exec_policy, printer, max_retries) try: runner.runall(testcases) finally: # Print a retry report if we did any retries if runner.stats.failures(run=0): printer.info(runner.stats.retry_report()) # Print a failure report if we had failures in the last run if runner.stats.failures(): printer.info(runner.stats.failure_report()) success = False if options.failure_stats: printer.info(runner.stats.failure_stats()) if options.performance_report: printer.info(runner.stats.performance_report()) else: printer.error(""No action specified. Please specify `-l'/`-L' for "" ""listing or `-r' for running. "" ""Try `%s -h' for more options."" % argparser.prog) sys.exit(1) if not success: sys.exit(1) sys.exit(0) except KeyboardInterrupt: sys.exit(1) except ReframeError as e: printer.error(str(e)) sys.exit(1) except (Exception, ReframeFatalError): printer.error(format_exception(*sys.exc_info())) sys.exit(1) finally: try: if site_config.get('general/0/save_log_files'): logging.save_log_files(rt.output_prefix) except OSError as e: printer.error('could not save log file: %s' % e) sys.exit(1) ","def main(): # Setup command line options argparser = argparse.ArgumentParser() output_options = argparser.add_argument_group( 'Options controlling ReFrame output' ) locate_options = argparser.add_argument_group( 'Options for discovering checks' ) select_options = argparser.add_argument_group( 'Options for selecting checks' ) action_options = argparser.add_argument_group( 'Options controlling actions' ) run_options = argparser.add_argument_group( 'Options controlling the execution of checks' ) env_options = argparser.add_argument_group( 'Options controlling the ReFrame environment' ) misc_options = argparser.add_argument_group('Miscellaneous options') # Output directory options output_options.add_argument( '--prefix', action='store', metavar='DIR', help='Set general directory prefix to DIR', envvar='RFM_PREFIX', configvar='systems/prefix' ) output_options.add_argument( '-o', '--output', action='store', metavar='DIR', help='Set output directory prefix to DIR', envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir' ) output_options.add_argument( '-s', '--stage', action='store', metavar='DIR', help='Set stage directory prefix to DIR', envvar='RFM_STAGE_DIR', configvar='systems/stagedir' ) output_options.add_argument( '--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT', help=('Append a timestamp to the output and stage directory prefixes ' '(default: ""%%FT%%T"")'), envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs' ) output_options.add_argument( '--perflogdir', action='store', metavar='DIR', help=('Set performance log data directory prefix ' '(relevant only to the filelog log handler)'), envvar='RFM_PERFLOG_DIR', configvar='logging/handlers_perflog/filelog_basedir' ) output_options.add_argument( '--keep-stage-files', action='store_true', help='Keep stage directories even for successful checks', envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files' ) output_options.add_argument( '--save-log-files', action='store_true', default=False, help='Save ReFrame log files to the output directory', envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files' ) # Check discovery options locate_options.add_argument( '-c', '--checkpath', action='append', metavar='PATH', help=""Add PATH to the check search path list"", envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path' ) locate_options.add_argument( '-R', '--recursive', action='store_true', help='Search for checks in the search path recursively', envvar='RFM_CHECK_SEARCH_RECURSIVE', configvar='general/check_search_recursive' ) locate_options.add_argument( '--ignore-check-conflicts', action='store_true', help='Skip checks with conflicting names', envvar='RFM_IGNORE_CHECK_CONFLICTS', configvar='general/ignore_check_conflicts' ) # Select options select_options.add_argument( '-t', '--tag', action='append', dest='tags', metavar='PATTERN', default=[], help='Select checks with at least one tag matching PATTERN' ) select_options.add_argument( '-n', '--name', action='append', dest='names', default=[], metavar='PATTERN', help='Select checks whose name matches PATTERN' ) select_options.add_argument( '-x', '--exclude', action='append', dest='exclude_names', metavar='PATTERN', default=[], help='Exclude checks whose name matches PATTERN' ) select_options.add_argument( '-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN', help=('Select checks with at least one ' 'programming environment matching PATTERN') ) select_options.add_argument( '--gpu-only', action='store_true', help='Select only GPU checks' ) select_options.add_argument( '--cpu-only', action='store_true', help='Select only CPU checks' ) # Action options action_options.add_argument( '-l', '--list', action='store_true', help='List the selected checks' ) action_options.add_argument( '-L', '--list-detailed', action='store_true', help='List the selected checks providing details for each test' ) action_options.add_argument( '-r', '--run', action='store_true', help='Run the selected checks' ) # Run options run_options.add_argument( '-A', '--account', action='store', help='Use ACCOUNT for submitting jobs (Slurm)' ) run_options.add_argument( '-P', '--partition', action='store', metavar='PART', help='Use PART for submitting jobs (Slurm/PBS/Torque)' ) run_options.add_argument( '--reservation', action='store', metavar='RES', help='Use RES for submitting jobs (Slurm)' ) run_options.add_argument( '--nodelist', action='store', help='Run checks on the selected list of nodes (Slurm)' ) run_options.add_argument( '--exclude-nodes', action='store', metavar='NODELIST', help='Exclude the list of nodes from running checks (Slurm)' ) run_options.add_argument( '--job-option', action='append', metavar='OPT', dest='job_options', default=[], help='Pass option OPT to job scheduler' ) run_options.add_argument( '--force-local', action='store_true', help='Force local execution of checks' ) run_options.add_argument( '--skip-sanity-check', action='store_true', help='Skip sanity checking' ) run_options.add_argument( '--skip-performance-check', action='store_true', help='Skip performance checking' ) run_options.add_argument( '--strict', action='store_true', help='Enforce strict performance checking' ) run_options.add_argument( '--skip-system-check', action='store_true', help='Skip system check' ) run_options.add_argument( '--skip-prgenv-check', action='store_true', help='Skip programming environment check' ) run_options.add_argument( '--exec-policy', metavar='POLICY', action='store', choices=['async', 'serial'], default='async', help='Set the execution policy of ReFrame (default: ""async"")' ) run_options.add_argument( '--mode', action='store', help='Execution mode to use' ) run_options.add_argument( '--max-retries', metavar='NUM', action='store', default=0, help='Set the maximum number of times a failed regression test ' 'may be retried (default: 0)' ) run_options.add_argument( '--flex-alloc-tasks', action='store', dest='flex_alloc_tasks', metavar='{all|idle|NUM}', default=None, help='*deprecated*, please use --flex-alloc-nodes instead' ) run_options.add_argument( '--flex-alloc-nodes', action='store', dest='flex_alloc_nodes', metavar='{all|idle|NUM}', default=None, help='Set strategy for the flexible node allocation (default: ""idle"").' ) env_options.add_argument( '-M', '--map-module', action='append', metavar='MAPPING', dest='module_mappings', default=[], help='Add a module mapping', envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings' ) env_options.add_argument( '-m', '--module', action='append', default=[], metavar='MOD', dest='user_modules', help='Load module MOD before running any regression check', envvar='RFM_USER_MODULES ,', configvar='general/user_modules' ) env_options.add_argument( '--module-mappings', action='store', metavar='FILE', dest='module_map_file', help='Load module mappings from FILE', envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file' ) env_options.add_argument( '-u', '--unload-module', action='append', metavar='MOD', dest='unload_modules', default=[], help='Unload module MOD before running any regression check', envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules' ) env_options.add_argument( '--purge-env', action='store_true', dest='purge_env', default=False, help='Unload all modules before running any regression check', envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment' ) env_options.add_argument( '--non-default-craype', action='store_true', help='Test a non-default Cray Programming Environment', envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype' ) # Miscellaneous options misc_options.add_argument( '-C', '--config-file', action='store', dest='config_file', metavar='FILE', help='Set configuration file', envvar='RFM_CONFIG_FILE' ) misc_options.add_argument( '--nocolor', action='store_false', dest='colorize', help='Disable coloring of output', envvar='RFM_COLORIZE', configvar='general/colorize' ) misc_options.add_argument( '--failure-stats', action='store_true', help='Print failure statistics' ) misc_options.add_argument( '--performance-report', action='store_true', help='Print a report for performance tests' ) misc_options.add_argument( '--show-config', action='store', nargs='?', const='all', metavar='PARAM', help='Print the value of configuration parameter PARAM and exit' ) misc_options.add_argument( '--system', action='store', help='Load configuration for SYSTEM', envvar='RFM_SYSTEM' ) misc_options.add_argument( '-V', '--version', action='version', version=os_ext.reframe_version() ) misc_options.add_argument( '-v', '--verbose', action='count', help='Increase verbosity level of output', envvar='RFM_VERBOSE', configvar='general/verbose' ) # Options not associated with command-line arguments argparser.add_argument( dest='graylog_server', envvar='RFM_GRAYLOG_SERVER', configvar='logging/handlers_perflog/graylog_address', help='Graylog server address' ) if len(sys.argv) == 1: argparser.print_help() sys.exit(1) # Parse command line options = argparser.parse_args() # First configure logging with our generic configuration so as to be able # to print pretty messages; logging will be reconfigured by user's # configuration later site_config = config.load_config( os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py') ) site_config.select_subconfig('generic') options.update_config(site_config) logging.configure_logging(site_config) logging.getlogger().colorize = site_config.get('general/0/colorize') printer = PrettyPrinter() printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) # Now configure ReFrame according to the user configuration file try: try: site_config = config.load_config(options.config_file) except ReframeDeprecationWarning as e: printer.warning(e) converted = config.convert_old_config(options.config_file) printer.warning( f""configuration file has been converted "" f""to the new syntax here: '{converted}'"" ) site_config = config.load_config(converted) site_config.validate() site_config.select_subconfig(options.system) for err in options.update_config(site_config): printer.warning(str(err)) logging.configure_logging(site_config) except (OSError, ConfigError) as e: printer.error(f'failed to load configuration: {e}') sys.exit(1) logging.getlogger().colorize = site_config.get('general/0/colorize') printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) try: runtime.init_runtime(site_config) except ConfigError as e: printer.error(f'failed to initialize runtime: {e}') sys.exit(1) rt = runtime.runtime() try: if site_config.get('general/0/module_map_file'): rt.modules_system.load_mapping_from_file( site_config.get('general/0/module_map_file') ) if site_config.get('general/0/module_mappings'): for m in site_config.get('general/0/module_mappings'): rt.modules_system.load_mapping(m) except (ConfigError, OSError) as e: printer.error('could not load module mappings: %s' % e) sys.exit(1) if options.mode: try: mode_args = rt.get_option(f'modes/@{options.mode}/options') # Parse the mode's options and reparse the command-line options = argparser.parse_args(mode_args) options = argparser.parse_args(namespace=options.cmd_options) options.update_config(rt.site_config) except ConfigError as e: printer.error('could not obtain execution mode: %s' % e) sys.exit(1) if (os_ext.samefile(rt.stage_prefix, rt.output_prefix) and not site_config.get('general/0/keep_stage_files')): printer.error(""stage and output refer to the same directory; "" ""if this is on purpose, please use the "" ""'--keep-stage-files' option."") sys.exit(1) # Show configuration after everything is set up if options.show_config: config_param = options.show_config if config_param == 'all': printer.info(str(rt.site_config)) else: value = rt.get_option(config_param) if value is None: printer.error( f'no such configuration parameter found: {config_param}' ) else: printer.info(json.dumps(value, indent=2)) sys.exit(0) printer.debug(format_env(options.env_vars)) # Setup the check loader loader = RegressionCheckLoader( load_path=site_config.get('general/0/check_search_path'), recurse=site_config.get('general/0/check_search_recursive'), ignore_conflicts=site_config.get('general/0/ignore_check_conflicts') ) def print_infoline(param, value): param = param + ':' printer.info(f"" {param.ljust(18)} {value}"") # Print command line printer.info(f""[ReFrame Setup]"") print_infoline('version', os_ext.reframe_version()) print_infoline('command', repr(' '.join(sys.argv))) print_infoline('launched by', f""{os_ext.osuser() or ''}@{socket.gethostname()}"") print_infoline('working directory', repr(os.getcwd())) print_infoline('check search path', f""{'(R) ' if loader.recurse else ''}"" f""{':'.join(loader.load_path)!r}"") print_infoline('stage directory', repr(rt.stage_prefix)) print_infoline('output directory', repr(rt.output_prefix)) printer.info('') try: # Locate and load checks try: checks_found = loader.load_all() except OSError as e: raise ReframeError from e # Filter checks by name checks_matched = checks_found if options.exclude_names: for name in options.exclude_names: checks_matched = filter(filters.have_not_name(name), checks_matched) if options.names: checks_matched = filter(filters.have_name('|'.join(options.names)), checks_matched) # Filter checks by tags for tag in options.tags: checks_matched = filter(filters.have_tag(tag), checks_matched) # Filter checks by prgenv if not options.skip_prgenv_check: for prgenv in options.prgenv: checks_matched = filter(filters.have_prgenv(prgenv), checks_matched) # Filter checks by system if not options.skip_system_check: checks_matched = filter( filters.have_partition(rt.system.partitions), checks_matched) # Filter checks further if options.gpu_only and options.cpu_only: printer.error(""options `--gpu-only' and `--cpu-only' "" ""are mutually exclusive"") sys.exit(1) if options.gpu_only: checks_matched = filter(filters.have_gpu_only(), checks_matched) elif options.cpu_only: checks_matched = filter(filters.have_cpu_only(), checks_matched) # Determine the allowed programming environments allowed_environs = {e.name for env_patt in options.prgenv for p in rt.system.partitions for e in p.environs if re.match(env_patt, e.name)} # Generate the test cases, validate dependencies and sort them checks_matched = list(checks_matched) testcases = generate_testcases(checks_matched, options.skip_system_check, options.skip_prgenv_check, allowed_environs) testgraph = dependency.build_deps(testcases) dependency.validate_deps(testgraph) testcases = dependency.toposort(testgraph) # Manipulate ReFrame's environment if site_config.get('general/0/purge_environment'): rt.modules_system.unload_all() else: for m in site_config.get('general/0/unload_modules'): rt.modules_system.unload_module(m) # Load the environment for the current system try: runtime.loadenv(rt.system.preload_environ) except EnvironError as e: printer.error(""failed to load current system's environment; "" ""please check your configuration"") printer.debug(str(e)) raise for m in site_config.get('general/0/user_modules'): try: rt.modules_system.load_module(m, force=True) except EnvironError as e: printer.warning(""could not load module '%s' correctly: "" ""Skipping..."" % m) printer.debug(str(e)) if options.flex_alloc_tasks: printer.warning(""`--flex-alloc-tasks' is deprecated and "" ""will be removed in the future; "" ""you should use --flex-alloc-nodes instead"") options.flex_alloc_nodes = (options.flex_alloc_nodes or options.flex_alloc_tasks) options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle' # Act on checks success = True if options.list: # List matched checks list_checks(list(checks_matched), printer) elif options.list_detailed: # List matched checks with details list_checks(list(checks_matched), printer, detailed=True) elif options.run: # Setup the execution policy if options.exec_policy == 'serial': exec_policy = SerialExecutionPolicy() elif options.exec_policy == 'async': exec_policy = AsynchronousExecutionPolicy() else: # This should not happen, since choices are handled by # argparser printer.error(""unknown execution policy `%s': Exiting..."") sys.exit(1) exec_policy.skip_system_check = options.skip_system_check exec_policy.force_local = options.force_local exec_policy.strict_check = options.strict exec_policy.skip_sanity_check = options.skip_sanity_check exec_policy.skip_performance_check = options.skip_performance_check exec_policy.keep_stage_files = site_config.get( 'general/0/keep_stage_files' ) try: errmsg = ""invalid option for --flex-alloc-nodes: '{0}'"" sched_flex_alloc_nodes = int(options.flex_alloc_nodes) if sched_flex_alloc_nodes <= 0: raise ConfigError(errmsg.format(options.flex_alloc_nodes)) except ValueError: if not options.flex_alloc_nodes.casefold() in {'idle', 'all'}: raise ConfigError( errmsg.format(options.flex_alloc_nodes)) from None sched_flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes exec_policy.flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_account = options.account exec_policy.sched_partition = options.partition exec_policy.sched_reservation = options.reservation exec_policy.sched_nodelist = options.nodelist exec_policy.sched_exclude_nodelist = options.exclude_nodes exec_policy.sched_options = options.job_options try: max_retries = int(options.max_retries) except ValueError: raise ConfigError('--max-retries is not a valid integer: %s' % max_retries) from None runner = Runner(exec_policy, printer, max_retries) try: runner.runall(testcases) finally: # Print a retry report if we did any retries if runner.stats.failures(run=0): printer.info(runner.stats.retry_report()) # Print a failure report if we had failures in the last run if runner.stats.failures(): printer.info(runner.stats.failure_report()) success = False if options.failure_stats: printer.info(runner.stats.failure_stats()) if options.performance_report: printer.info(runner.stats.performance_report()) else: printer.error(""No action specified. Please specify `-l'/`-L' for "" ""listing or `-r' for running. "" ""Try `%s -h' for more options."" % argparser.prog) sys.exit(1) if not success: sys.exit(1) sys.exit(0) except KeyboardInterrupt: sys.exit(1) except ReframeError as e: printer.error(str(e)) sys.exit(1) except (Exception, ReframeFatalError): printer.error(format_exception(*sys.exc_info())) sys.exit(1) finally: try: if site_config.get('general/0/save_log_files'): logging.save_log_files(rt.output_prefix) except OSError as e: printer.error('could not save log file: %s' % e) sys.exit(1) " 59396,"def _generate_latex_source(circuit, filename=None, basis=""id,u0,u1,u2,u3,x,y,z,h,s,sdg,t,tdg,rx,ry,rz,"" ""cx,cy,cz,ch,crz,cu1,cu3,swap,ccx,cswap"", scale=0.7, style=None, reverse_bits=False, plot_barriers=True): """"""Convert QuantumCircuit to LaTeX string. Args: circuit (QuantumCircuit): input circuit scale (float): image scaling filename (str): optional filename to write latex basis (str): optional comma-separated list of gate names style (dict or str): dictionary of style or file name of style file reverse_bits (bool): When set to True reverse the bit order inside registers for the output visualization. plot_barriers (bool): Enable/disable drawing barriers in the output circuit. Defaults to True. Returns: str: Latex string appropriate for writing to file. """""" dag_circuit = DAGCircuit.fromQuantumCircuit(circuit, expand_gates=False) qregs, cregs, ops = _utils._get_instructions(dag_circuit, reversebits=reverse_bits) qcimg = _latex.QCircuitImage(qregs, cregs, ops, scale, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits) latex = qcimg.latex() if filename: with open(filename, 'w') as latex_file: latex_file.write(latex) return latex ","def _generate_latex_source(circuit, filename=None, basis=""id,u0,u1,u2,u3,x,y,z,h,s,sdg,t,tdg,rx,ry,rz,"" ""cx,cy,cz,ch,crz,cu1,cu3,swap,ccx,cswap"", scale=0.7, style=None, reverse_bits=False, plot_barriers=True): """"""Convert QuantumCircuit to LaTeX string. # Deprecate the following lines -------------------------------------------------| if basis is None: # | basis = (""id,u0,u1,u2,u3,x,y,z,h,s,sdg,t,tdg,rx,ry,rz,"" # | ""cx,cy,cz,ch,crz,cu1,cu3,swap,ccx,cswap"") # | else: # | warnings.warn('The basis kwarg is deprecated and the circuit drawer ' # | 'function will not be able to adjust basis gates itself ' # | 'in a future release', DeprecationWarning) # | basis = basis.split(',') if basis else [] # | dag = _dagunroller.DagUnroller(dag, _dagbackend.DAGBackend(basis)).expand_gates() # | # -------------------------------------------------------------------------------| Args: circuit (QuantumCircuit): input circuit scale (float): image scaling filename (str): optional filename to write latex basis (str): optional comma-separated list of gate names style (dict or str): dictionary of style or file name of style file reverse_bits (bool): When set to True reverse the bit order inside registers for the output visualization. plot_barriers (bool): Enable/disable drawing barriers in the output circuit. Defaults to True. Returns: str: Latex string appropriate for writing to file. """""" dag_circuit = DAGCircuit.fromQuantumCircuit(circuit, expand_gates=False) qregs, cregs, ops = _utils._get_instructions(dag_circuit, reversebits=reverse_bits) qcimg = _latex.QCircuitImage(qregs, cregs, ops, scale, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits) latex = qcimg.latex() if filename: with open(filename, 'w') as latex_file: latex_file.write(latex) return latex " 11558,"def record_search_query_params(params, separate_replies): """""" Send search params to New Relic as ""attributes"" on each transaction. Only the first 255 characters of the value are retained and values must be str, int, float, or bool types. Disclaimer: If there are multiple values for a single key, only submit the first value to New Relic as there is no way of submitting multiple values for a single attribute. :arg params: the request params to record :type params: webob.multidict.MultiDict :arg separate_replies: the value of the separate_replies search setting :type separate_replies: bool """""" keys = [ # Record usage of inefficient offset and it's alternative search_after. ""offset"", ""search_after"", ""sort"", # Record usage of url/uri (url is an alias of uri). ""url"", ""uri"", # Record usage of tags/tag (tags is an alias of tag). ""tags"", ""tag"", # Record group and user-these help in identifying slow queries. ""group"", ""user"", # Record usage of wildcard feature. ""wildcard_uri"", ] # The New Relic Query Language does not permit _ at the begining # and offset is a reserved key word. params = [(f""es_{k}"", params[k]) for k in keys if k in params] # Record usage of _separate_replies which will help distinguish client calls # for loading the sidebar annotations from other API calls. if separate_replies: params.append((""es__separate_replies"", separate_replies)) newrelic.agent.add_custom_parameters(params) ","def record_search_query_params(params, separate_replies): """""" Send search params to New Relic as ""attributes"" on each transaction. Only the first 255 characters of the value are retained and values must be str, int, float, or bool types. Disclaimer: If there are multiple values for a single key, only submit the first value to New Relic as there is no way of submitting multiple values for a single attribute. :arg params: the request params to record :type params: webob.multidict.MultiDict :arg separate_replies: the value of the separate_replies search setting :type separate_replies: bool """""" keys = [ # Record usage of inefficient offset and it's alternative search_after. ""offset"", ""search_after"", ""sort"", # Record usage of url/uri (url is an alias of uri). ""url"", ""uri"", # Record usage of tags/tag (tags is an alias of tag). ""tags"", ""tag"", # Record group and user-these help in identifying slow queries. ""group"", ""user"", # Record usage of wildcard feature. ""wildcard_uri"", ] # The New Relic Query Language does not permit _ at the begining # and offset is a reserved key word. params = [(f""es_{key}"", params[key]) for key in keys if key in params] # Record usage of _separate_replies which will help distinguish client calls # for loading the sidebar annotations from other API calls. if separate_replies: params.append((""es__separate_replies"", separate_replies)) newrelic.agent.add_custom_parameters(params) " 43564,"def AmplitudeEmbedding(features, wires, pad=False, normalize=False): r""""""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits. If the total number of features to embed are less than the :math:`2^n` available amplitudes, non-informative constants (zeros) can be padded to ``features``. To enable this, the argument ``pad`` should be set to ``True``. The L2-norm of ``features`` must be one. By default, AmplitudeEmbedding expects a normalized feature vector. The argument ``normalize`` can be set to ``True`` to automatically normalize it. .. note:: AmplitudeEmbedding uses PennyLane's :class:`~pennylane.ops.QubitStateVector` and only works in conjunction with devices that implement this function. Args: features (array): input array of shape ``(2**n,)`` wires (Sequence[int]): sequence of qubit indices that the template acts on pad (Boolean): controls the activation of the padding option normalize (Boolean): controls the activation of automatic normalization Raises: ValueError: if `features` or `wires` is invalid """""" if isinstance(wires, int): wires = [wires] features = np.array(features) n_features = len(features) n_amplitudes = 2**len(wires) if n_amplitudes < n_features: raise ValueError(""AmplitudeEmbedding requires the size of feature vector to be "" ""smaller than or equal to 2**len(wires), which is {}; "" ""got {}."".format(n_amplitudes, n_features)) if pad and n_amplitudes >= n_features: features = np.pad(features, (0, n_amplitudes-n_features), 'constant') if not pad and n_amplitudes != n_features: raise ValueError(""AmplitudeEmbedding must get a feature vector of size 2**len(wires), "" ""which is {}; got {}. Use ``pad=True`` to automatically pad the "" ""features with zeros."".format(n_amplitudes, n_features)) # Get normalization norm = 0 for f in features: if isinstance(f, Variable): norm += np.conj(f.val)*f.val else: norm += np.conj(f)*f if not np.isclose(norm, 1): if normalize: features = features/np.sqrt(norm) else: raise ValueError(""AmplitudeEmbedding requires a normalized feature vector. "" ""Set ``normalize=True`` to automatically normalize it."") QubitStateVector(features, wires=wires) ","def AmplitudeEmbedding(features, wires, pad=False, normalize=False): r""""""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits. If the total number of features to embed are less than the :math:`2^n` available amplitudes, non-informative constants (zeros) can be padded to ``features``. To enable this, the argument ``pad`` should be set to ``True``. The L2-norm of ``features`` must be one. By default, AmplitudeEmbedding expects a normalized feature vector. The argument ``normalize`` can be set to ``True`` to automatically normalize it. .. note:: AmplitudeEmbedding uses PennyLane's :class:`~pennylane.ops.QubitStateVector` and only works in conjunction with devices that implement this function. Args: features (array): input array of shape ``(2**n,)`` wires (Sequence[int]): sequence of qubit indices that the template acts on pad (Boolean): controls the activation of the padding option normalize (Boolean): controls the activation of automatic normalization Raises: ValueError: if ``features`` or ``wires`` is invalid """""" if isinstance(wires, int): wires = [wires] features = np.array(features) n_features = len(features) n_amplitudes = 2**len(wires) if n_amplitudes < n_features: raise ValueError(""AmplitudeEmbedding requires the size of feature vector to be "" ""smaller than or equal to 2**len(wires), which is {}; "" ""got {}."".format(n_amplitudes, n_features)) if pad and n_amplitudes >= n_features: features = np.pad(features, (0, n_amplitudes-n_features), 'constant') if not pad and n_amplitudes != n_features: raise ValueError(""AmplitudeEmbedding must get a feature vector of size 2**len(wires), "" ""which is {}; got {}. Use ``pad=True`` to automatically pad the "" ""features with zeros."".format(n_amplitudes, n_features)) # Get normalization norm = 0 for f in features: if isinstance(f, Variable): norm += np.conj(f.val)*f.val else: norm += np.conj(f)*f if not np.isclose(norm, 1): if normalize: features = features/np.sqrt(norm) else: raise ValueError(""AmplitudeEmbedding requires a normalized feature vector. "" ""Set ``normalize=True`` to automatically normalize it."") QubitStateVector(features, wires=wires) " 49148,"def update_build_options(key_value_dict): """""" Update all build options in the key_value_dict with the value given in that dictionary, by calling update_build_option(key, value) repeatedly. This function can be used e.g. when EasyConfig-specific build options are passed in an EasyStack file. See https://github.com/easybuilders/easybuild-framework/issues/3513#issuecomment-986990195 """""" orig_key_value_dict = {} for key, value in key_value_dict.items(): orig_key_value_dict[key] = update_build_option(key, value) # Return original key-value pairs in a dictionary. # This way, they can later be restored by a single call to update_build_options(orig_key_value_dict) return orig_key_value_dict ","def update_build_options(key_value_dict): """""" Update build options as specified by the given dictionary (where keys are assumed to be build option names). Returns dictionary with original values for the updated build options. """""" orig_key_value_dict = {} for key, value in key_value_dict.items(): orig_key_value_dict[key] = update_build_option(key, value) # Return original key-value pairs in a dictionary. # This way, they can later be restored by a single call to update_build_options(orig_key_value_dict) return orig_key_value_dict " 30531,"def get_indicator_type(indicator_type, item): """"""Returns the indicator type in Demisto Args: indicator_type (str): ip, url, domain or hash item (dict): the indicator row from the csv response Returns: str. The indicator type per the indicators defined in Demisto """""" if indicator_type == 'ip': return ip_to_indicator_type(item.get('Name')) elif indicator_type == 'hash': return FeedIndicatorType.File elif indicator_type == 'domain': # If * is in the domain it is of type domain globe if '*' in item.get('Name'): return FeedIndicatorType.DomainGlob return FeedIndicatorType.Domain elif indicator_type == 'url': return FeedIndicatorType.URL ","def get_indicator_type(indicator_type, item): """"""Returns the indicator type in Demisto Args: indicator_type (str): ip, url, domain or hash item (dict): the indicator row from the csv response Returns: str. The indicator type per the indicators defined in Demisto """""" if indicator_type == 'ip': return ip_to_indicator_type(item.get('Name')) elif indicator_type == 'hash': return FeedIndicatorType.File elif indicator_type == 'domain': # If * is in the domain it is of type DomainGlob if '*' in item.get('Name'): return FeedIndicatorType.DomainGlob return FeedIndicatorType.Domain elif indicator_type == 'url': return FeedIndicatorType.URL " 55504,"def create_df_from_partitions(partitions, axis): """""" Create DataFrame from remote partitions. Parameters ---------- partitions : list List of Ray.ObjectRef/Dask.Future referencing to partitions in depend of the engine used. Or list containing tuples of Ray.ObjectRef/Dask.Future referencing to ip addresses of partitions and partitions itself in depend of the engine used. axis : None, 0 or 1 The `axis` parameter is used to identify what are the partitions passed. You have to set: - `axis` to 0 if you want to create DataFrame from row partitions. - `axis` to 1 if you want to create DataFrame from column partitions. - `axis` to None if you want to create DataFrame from 2D list of partitions. Returns ------- DataFrame DataFrame instance created from remote partitions. """""" from modin.data_management.factories.dispatcher import EngineDispatcher factory = EngineDispatcher.get_engine() partition_class = factory.io_cls.frame_cls._frame_mgr_cls._partition_class partition_frame_class = factory.io_cls.frame_cls partition_mgr_class = factory.io_cls.frame_cls._frame_mgr_cls # When collecting partitions to NumPy array they will be kept row-wise if axis is None: if isinstance(partitions[0][0], tuple): if EnablePartitionIPs.get() is False: raise ValueError( ""Passed `partitions` with IPs but `MODIN_ENABLE_PARTITIONS_API` env var was not exported."" ) parts = np.array( [ [partition_class(partition, ip=ip) for ip, partition in row] for row in partitions ] ) else: parts = np.array( [ [partition_class(partition) for partition in row] for row in partitions ] ) # When collecting partitions to NumPy array they will be kept row-wise elif axis == 0: if isinstance(partitions[0], tuple): if EnablePartitionIPs.get() is False: raise ValueError( ""Passed `partitions` with IPs but `MODIN_ENABLE_PARTITIONS_API` env var was not exported."" ) parts = np.array( [[partition_class(partition, ip=ip)] for ip, partition in partitions] ) else: parts = np.array([[partition_class(partition)] for partition in partitions]) # When collecting partitions to NumPy array they will be kept column-wise elif axis == 1: if isinstance(partitions[0], tuple): if EnablePartitionIPs.get() is False: raise ValueError( ""Passed `partitions` with IPs but `MODIN_ENABLE_PARTITIONS_API` env var was not exported."" ) parts = np.array( [[partition_class(partition, ip=ip) for ip, partition in partitions]] ) else: parts = np.array([[partition_class(partition) for partition in partitions]]) else: raise ValueError( f""Got unacceptable value of axis {axis}. Possible values are {0}, {1} or {None}."" ) index = partition_mgr_class.get_indices(0, parts, lambda df: df.axes[0]) columns = partition_mgr_class.get_indices(1, parts, lambda df: df.axes[1]) return DataFrame( query_compiler=PandasQueryCompiler(partition_frame_class(parts, index, columns)) ) ","def create_df_from_partitions(partitions, axis): """""" Create DataFrame from remote partitions. Parameters ---------- partitions : list List of Ray.ObjectRef/Dask.Future referencing to partitions in depend of the engine used. Or list containing tuples of Ray.ObjectRef/Dask.Future referencing to ip addresses of partitions and partitions itself in depend of the engine used. axis : None, 0 or 1 The `axis` parameter is used to identify what are the partitions passed. You have to set: - `axis` to 0 if you want to create DataFrame from row partitions. - `axis` to 1 if you want to create DataFrame from column partitions. - `axis` to None if you want to create DataFrame from 2D list of partitions. Returns ------- DataFrame DataFrame instance created from remote partitions. """""" from modin.data_management.factories.dispatcher import EngineDispatcher factory = EngineDispatcher.get_engine() partition_class = factory.io_cls.frame_cls._frame_mgr_cls._partition_class partition_frame_class = factory.io_cls.frame_cls partition_mgr_class = factory.io_cls.frame_cls._frame_mgr_cls # When collecting partitions to NumPy array they will be kept row-wise if axis is None: if isinstance(partitions[0][0], tuple): if not EnablePartitionIPs.get(): raise ValueError( ""Passed `partitions` with IPs but paritions API was not enabled."" ) parts = np.array( [ [partition_class(partition, ip=ip) for ip, partition in row] for row in partitions ] ) else: parts = np.array( [ [partition_class(partition) for partition in row] for row in partitions ] ) # When collecting partitions to NumPy array they will be kept row-wise elif axis == 0: if isinstance(partitions[0], tuple): if EnablePartitionIPs.get() is False: raise ValueError( ""Passed `partitions` with IPs but `MODIN_ENABLE_PARTITIONS_API` env var was not exported."" ) parts = np.array( [[partition_class(partition, ip=ip)] for ip, partition in partitions] ) else: parts = np.array([[partition_class(partition)] for partition in partitions]) # When collecting partitions to NumPy array they will be kept column-wise elif axis == 1: if isinstance(partitions[0], tuple): if EnablePartitionIPs.get() is False: raise ValueError( ""Passed `partitions` with IPs but `MODIN_ENABLE_PARTITIONS_API` env var was not exported."" ) parts = np.array( [[partition_class(partition, ip=ip) for ip, partition in partitions]] ) else: parts = np.array([[partition_class(partition) for partition in partitions]]) else: raise ValueError( f""Got unacceptable value of axis {axis}. Possible values are {0}, {1} or {None}."" ) index = partition_mgr_class.get_indices(0, parts, lambda df: df.axes[0]) columns = partition_mgr_class.get_indices(1, parts, lambda df: df.axes[1]) return DataFrame( query_compiler=PandasQueryCompiler(partition_frame_class(parts, index, columns)) ) " 40603,"def simplify_links(n, costs, config, output, aggregation_strategies=dict()): ## Complex multi-node links are folded into end-points logger.info(""Simplifying connected link components"") if n.links.empty: return n, n.buses.index.to_series() # Determine connected link components, ignore all links but DC adjacency_matrix = n.adjacency_matrix(branch_components=['Link'], weights=dict(Link=(n.links.carrier == 'DC').astype(float))) _, labels = connected_components(adjacency_matrix, directed=False) labels = pd.Series(labels, n.buses.index) G = n.graph() def split_links(nodes): nodes = frozenset(nodes) seen = set() supernodes = {m for m in nodes if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)} for u in supernodes: for m, ls in G.adj[u].items(): if m not in nodes or m in seen: continue buses = [u, m] links = [list(ls)] #[name for name in ls]] while m not in (supernodes | seen): seen.add(m) for m2, ls in G.adj[m].items(): if m2 in seen or m2 == u: continue buses.append(m2) links.append(list(ls)) # [name for name in ls]) break else: # stub break m = m2 if m != u: yield pd.Index((u, m)), buses, links seen.add(u) busmap = n.buses.index.to_series() connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config) connection_costs_to_bus = pd.DataFrame(0., index=n.buses.index, columns=list(connection_costs_per_link)) for lbl in labels.value_counts().loc[lambda s: s > 2].index: for b, buses, links in split_links(labels.index[labels == lbl]): if len(buses) <= 2: continue logger.debug('nodes = {}'.format(labels.index[labels == lbl])) logger.debug('b = {}\nbuses = {}\nlinks = {}'.format(b, buses, links)) m = sp.spatial.distance_matrix(n.buses.loc[b, ['x', 'y']], n.buses.loc[buses[1:-1], ['x', 'y']]) busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]] connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_per_link, buses) all_links = [i for _, i in sum(links, [])] p_max_pu = config['links'].get('p_max_pu', 1.) lengths = n.links.loc[all_links, 'length'] name = lengths.idxmax() + '+{}'.format(len(links) - 1) params = dict( carrier='DC', bus0=b[0], bus1=b[1], length=sum(n.links.loc[[i for _, i in l], 'length'].mean() for l in links), p_nom=min(n.links.loc[[i for _, i in l], 'p_nom'].sum() for l in links), underwater_fraction=sum(lengths/lengths.sum() * n.links.loc[all_links, 'underwater_fraction']), p_max_pu=p_max_pu, p_min_pu=-p_max_pu, underground=False, under_construction=False ) logger.info(""Joining the links {} connecting the buses {} to simple link {}"".format("", "".join(all_links), "", "".join(buses), name)) n.mremove(""Link"", all_links) static_attrs = n.components[""Link""][""attrs""].loc[lambda df: df.static] for attr, default in static_attrs.default.iteritems(): params.setdefault(attr, default) n.links.loc[name] = pd.Series(params) # n.add(""Link"", **params) logger.debug(""Collecting all components using the busmap"") exclude_carriers=config[""clustering""][""exclude_carriers""] _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output, aggregation_strategies=aggregation_strategies, exclude_carriers=exclude_carriers) return n, busmap ","def simplify_links(n, costs, config, output, aggregation_strategies=dict()): ## Complex multi-node links are folded into end-points logger.info(""Simplifying connected link components"") if n.links.empty: return n, n.buses.index.to_series() # Determine connected link components, ignore all links but DC adjacency_matrix = n.adjacency_matrix(branch_components=['Link'], weights=dict(Link=(n.links.carrier == 'DC').astype(float))) _, labels = connected_components(adjacency_matrix, directed=False) labels = pd.Series(labels, n.buses.index) G = n.graph() def split_links(nodes): nodes = frozenset(nodes) seen = set() supernodes = {m for m in nodes if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)} for u in supernodes: for m, ls in G.adj[u].items(): if m not in nodes or m in seen: continue buses = [u, m] links = [list(ls)] #[name for name in ls]] while m not in (supernodes | seen): seen.add(m) for m2, ls in G.adj[m].items(): if m2 in seen or m2 == u: continue buses.append(m2) links.append(list(ls)) # [name for name in ls]) break else: # stub break m = m2 if m != u: yield pd.Index((u, m)), buses, links seen.add(u) busmap = n.buses.index.to_series() connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config) connection_costs_to_bus = pd.DataFrame(0., index=n.buses.index, columns=list(connection_costs_per_link)) for lbl in labels.value_counts().loc[lambda s: s > 2].index: for b, buses, links in split_links(labels.index[labels == lbl]): if len(buses) <= 2: continue logger.debug('nodes = {}'.format(labels.index[labels == lbl])) logger.debug('b = {}\nbuses = {}\nlinks = {}'.format(b, buses, links)) m = sp.spatial.distance_matrix(n.buses.loc[b, ['x', 'y']], n.buses.loc[buses[1:-1], ['x', 'y']]) busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]] connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_per_link, buses) all_links = [i for _, i in sum(links, [])] p_max_pu = config['links'].get('p_max_pu', 1.) lengths = n.links.loc[all_links, 'length'] name = lengths.idxmax() + '+{}'.format(len(links) - 1) params = dict( carrier='DC', bus0=b[0], bus1=b[1], length=sum(n.links.loc[[i for _, i in l], 'length'].mean() for l in links), p_nom=min(n.links.loc[[i for _, i in l], 'p_nom'].sum() for l in links), underwater_fraction=sum(lengths/lengths.sum() * n.links.loc[all_links, 'underwater_fraction']), p_max_pu=p_max_pu, p_min_pu=-p_max_pu, underground=False, under_construction=False ) logger.info(""Joining the links {} connecting the buses {} to simple link {}"".format("", "".join(all_links), "", "".join(buses), name)) n.mremove(""Link"", all_links) static_attrs = n.components[""Link""][""attrs""].loc[lambda df: df.static] for attr, default in static_attrs.default.iteritems(): params.setdefault(attr, default) n.links.loc[name] = pd.Series(params) # n.add(""Link"", **params) logger.debug(""Collecting all components using the busmap"") exclude_carriers = config[""clustering""].get(""exclude_carriers"", []) _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output, aggregation_strategies=aggregation_strategies, exclude_carriers=exclude_carriers) return n, busmap " 48132,"def test_algorithms_table(): def algorithms_generate_table(templates): attributes = [""model_template_id"", ""name"", ""gigaflops"", ""size""] header = attributes + [""Path""] attributes_in_md = {""name"": ""Name"", ""model_template_id"": ""ID"", ""gigaflops"": ""GFlops"", ""size"": ""Size"", ""Path"": ""Path""} table = ["" | "".join([attributes_in_md[x] for x in header])] + ["" | "".join([""-------"" for _ in header])] for template in sorted(templates, key=lambda x: float(x.gigaflops)): record = [str(getattr(template, attr)) for attr in attributes ] record.append(os.path.relpath(template.model_template_path, './external')) record = "" | "".join(record) table += [record] return ""\n"".join(table) with open(""external/README.md"", encoding=""UTF-8"") as read_file: full_text = '' for line in read_file: full_text += line registry = Registry(""."") templates_per_task_type = defaultdict(list) for template in sorted(registry.templates, key=lambda x:str(x.task_type)): templates_per_task_type[template.task_type].append(template) for task_type, templates in templates_per_task_type.items(): generated_table = algorithms_generate_table(templates) print(""\n"", task_type) print(generated_table) assert generated_table in full_text, f""\n{task_type}\n{generated_table}"" ","def test_algorithms_table(): def algorithms_generate_table(templates): attributes = [""model_template_id"", ""name"", ""gigaflops"", ""size""] header = attributes + [""Path""] attributes_in_md = {""name"": ""Name"", ""model_template_id"": ""ID"", ""gigaflops"": ""GFlops"", ""size"": ""Size"", ""Path"": ""Path""} table = ["" | "".join([attributes_in_md[x] for x in header])] + ["" | "".join([""-------"" for _ in header])] for template in sorted(templates, key=lambda x: float(x.gigaflops)): record = [str(getattr(template, attr)) for attr in attributes ] record.append(os.path.relpath(template.model_template_path, './external')) record = "" | "".join(record) table += [record] return ""\n"".join(table) with open(""external/README.md"", encoding=""UTF-8"") as read_file: full_text = '' for line in read_file: full_text += line registry = Registry(""."") templates_per_task_type = defaultdict(list) for template in sorted(registry.templates, key=lambda x:str(x.task_type)): templates_per_task_type[template.task_type].append(template) for task_type, templates in templates_per_task_type.items(): generated_table = algorithms_generate_table(templates) print(""\n"", task_type) print(generated_table) assert generated_table in full_text, f""\n{generated_table} not in \n{full_text}\n for the task {task_type}\n"" " 8750,"def rule(*patterns): """"""Decorate a function to be called when a line matches the given pattern Each argument is a regular expression which will trigger the function. This decorator can be used multiple times to add more rules. If the Sopel instance is in a channel, or sent a PRIVMSG, where a string matching this expression is said, the function will execute. Note that captured groups here will be retrievable through the Trigger object later. Inside the regular expression, some special directives can be used. $nick will be replaced with the nick of the bot and , or :, and $nickname will be replaced with the nick of the bot. .. versionchanged:: 7.0 The :func:`rule` decorator can be called with many positional arguments, each used to add a rule. This is equivalent as decorating the same function many times with this decorator. """""" def add_attribute(function): if not hasattr(function, ""rule""): function.rule = [] for value in patterns: if value not in function.rule: function.rule.append(value) return function return add_attribute ","def rule(*patterns): """"""Decorate a function to be called when a line matches the given pattern Each argument is a regular expression which will trigger the function. This decorator can be used multiple times to add more rules. If the Sopel instance is in a channel, or sent a PRIVMSG, where a string matching this expression is said, the function will execute. Note that captured groups here will be retrievable through the Trigger object later. Inside the regular expression, some special directives can be used. $nick will be replaced with the nick of the bot and , or :, and $nickname will be replaced with the nick of the bot. .. versionchanged:: 7.0 The :func:`rule` decorator can be called with multiple positional arguments, each used to add a rule. This is equivalent as decorating the same function many times with this decorator. """""" def add_attribute(function): if not hasattr(function, ""rule""): function.rule = [] for value in patterns: if value not in function.rule: function.rule.append(value) return function return add_attribute " 24965,"def get_numversion_from_version(v: str) -> tuple[int, ...]: """"""Kept for compatibility reason. See https://github.com/PyCQA/pylint/issues/4399 https://github.com/PyCQA/pylint/issues/4420, """""" v = v.replace(""pylint-"", """") version = [] for n in v.split(""."")[0:3]: try: version.append(int(n)) except ValueError: num = """" for c in n: if c.isdigit(): num += c else: break try: version.append(int(num)) except ValueError: version.append(0) while len(version) != 3: version.append(0) return tuple(version) ","def get_numversion_from_version(v: str) -> tuple[int, int, int]: """"""Kept for compatibility reason. See https://github.com/PyCQA/pylint/issues/4399 https://github.com/PyCQA/pylint/issues/4420, """""" v = v.replace(""pylint-"", """") version = [] for n in v.split(""."")[0:3]: try: version.append(int(n)) except ValueError: num = """" for c in n: if c.isdigit(): num += c else: break try: version.append(int(num)) except ValueError: version.append(0) while len(version) != 3: version.append(0) return tuple(version) " 8375,"def _compute_single_fwhm(flux, spectral_axis): argmax = np.argmax(flux) halfval = flux[argmax] / 2 left = flux[:argmax] <= halfval right = flux[argmax+1:] <= halfval # Highest signal at the first point if sum(left) == 0: l_idx = 0 else: l_idx = np.where(left == True)[0][-1] # Highest signal at the last point if sum(right) == 0: r_idx = len(flux)-1 else: r_idx = np.where(right == True)[0][0] + argmax return spectral_axis[r_idx] - spectral_axis[l_idx] ","def _compute_single_fwhm(flux, spectral_axis): argmax = np.argmax(flux) halfval = flux[argmax] / 2 left = flux[:argmax] <= halfval right = flux[argmax+1:] <= halfval # Highest signal at the first point if np.sum(left) == 0: l_idx = 0 else: l_idx = np.where(left == True)[0][-1] # Highest signal at the last point if sum(right) == 0: r_idx = len(flux)-1 else: r_idx = np.where(right == True)[0][0] + argmax return spectral_axis[r_idx] - spectral_axis[l_idx] " 35469,"def get_safe_obstacle_distance(v_ego): return (v_ego*v_ego) / (2 * COMFORT_BRAKE) + T_FOLLOW * v_ego + STOP_DISTANCE ","def get_safe_obstacle_distance(v_ego): return v_ego**2 / (2 * COMFORT_BRAKE) + T_FOLLOW * v_ego + STOP_DISTANCE " 24641,"def _vspace_iterator(vspace, MAX_ITERATIONS=500, err=1e-10): r"""""" Returns an array of null point object, representing the null points of the given vector space. Parameters ---------- vspace: array_like The vector space as constructed by the vector_space function which is A 1 by 3 array with the first element containing the coordinates, the second element containing the vector values, and the third element containing the delta values for each dimension. MAX_ITERATIONS: int The maximum iterations of the Newton-Raphson method. The default value is 500. err: float The threshold/error that determines if convergence has occured using the Newton-Raphson method. The default value is ``1e-10``. Returns ------- array_like of `~plasmapy.analysis.nullpoint.NullPoint` An array of NullPoint objects representing the nullpoints of the given vector space. """""" nullpoints = [] for i in range(len(vspace[0][0]) - 1): for j in range(len(vspace[0][0][0]) - 1): for k in range(len(vspace[0][0][0][0]) - 1): if _reduction(vspace, [i, j, k]): if _trilinear_analysis(vspace, [i, j, k]): loc = _locate_null_point(vspace, [i, j, k], MAX_ITERATIONS, err) if loc is not None: p = NullPoint(loc, ""N/A"") if p not in nullpoints: nullpoints.append(p) return nullpoints ","def _vspace_iterator(vspace, maxiter=500, err=1e-10): r"""""" Returns an array of null point object, representing the null points of the given vector space. Parameters ---------- vspace: array_like The vector space as constructed by the vector_space function which is A 1 by 3 array with the first element containing the coordinates, the second element containing the vector values, and the third element containing the delta values for each dimension. MAX_ITERATIONS: int The maximum iterations of the Newton-Raphson method. The default value is 500. err: float The threshold/error that determines if convergence has occured using the Newton-Raphson method. The default value is ``1e-10``. Returns ------- array_like of `~plasmapy.analysis.nullpoint.NullPoint` An array of NullPoint objects representing the nullpoints of the given vector space. """""" nullpoints = [] for i in range(len(vspace[0][0]) - 1): for j in range(len(vspace[0][0][0]) - 1): for k in range(len(vspace[0][0][0][0]) - 1): if _reduction(vspace, [i, j, k]): if _trilinear_analysis(vspace, [i, j, k]): loc = _locate_null_point(vspace, [i, j, k], MAX_ITERATIONS, err) if loc is not None: p = NullPoint(loc, ""N/A"") if p not in nullpoints: nullpoints.append(p) return nullpoints " 29766,"def insert_playlists(troi_patch_slug, import_file): connection = db.engine.raw_connection() query = """""" INSERT INTO statistics.year_in_music(user_id, data) SELECT ""user"".id , jsonb_build_object('playlists', playlists::jsonb) FROM (VALUES %s) AS t(user_name, playlists) JOIN ""user"" ON ""user"".musicbrainz_id = user_name ON CONFLICT (user_id) DO UPDATE SET data = statistics.year_in_music.data || EXCLUDED.data """""" print(""start playlist import"") data = [] with open(import_file, ""r"") as f: while True: user_name = f.readline() if user_name == """": break user_name = user_name.strip() playlist_mbid = f.readline().strip() jspf = f.readline().strip() data.append((user_name, ujson.dumps({troi_patch_slug: {""mbid"": playlist_mbid, ""jspf"": jspf}}))) for user_name, js in data: print(""%s %s"" % (user_name, js[:20])) try: with connection.cursor() as cursor: execute_values(cursor, query, data) connection.commit() print(""playlists imported."") except psycopg2.errors.OperationalError: connection.rollback() current_app.logger.error(""Error while inserting playlist/%s:"" % playlist_slug, exc_info=True) print(""playlists import failed."") ","def insert_playlists(troi_patch_slug, import_file): connection = db.engine.raw_connection() query = """""" INSERT INTO statistics.year_in_music(user_id, data) SELECT ""user"".id , jsonb_build_object('playlists', playlists::jsonb) FROM (VALUES %s) AS t(user_name, playlists) JOIN ""user"" ON ""user"".musicbrainz_id = user_name ON CONFLICT (user_id) DO UPDATE SET data = statistics.year_in_music.data || EXCLUDED.data """""" print(""start playlist import"") data = [] with open(import_file, ""r"") as f: while True: user_name = f.readline() if user_name == """": break user_name = user_name.strip() playlist_mbid = f.readline().strip() jspf = f.readline().strip() data.append((user_name, ujson.dumps({troi_patch_slug: {""mbid"": playlist_mbid, ""jspf"": ujson.loads(jspf)}}))) for user_name, js in data: print(""%s %s"" % (user_name, js[:20])) try: with connection.cursor() as cursor: execute_values(cursor, query, data) connection.commit() print(""playlists imported."") except psycopg2.errors.OperationalError: connection.rollback() current_app.logger.error(""Error while inserting playlist/%s:"" % playlist_slug, exc_info=True) print(""playlists import failed."") " 34043,"def format_object_summary_output(state_data: Dict): if len(state_data) == 0: return ""No resource in the cluster"" # Parse the data. cluster_data = state_data[""cluster""] summaries = cluster_data[""summary""] summary_by = cluster_data[""summary_by""] del cluster_data[""summary_by""] del cluster_data[""summary""] cluster_info_table = yaml.dump(cluster_data, indent=2) # Create a table per callsite. tables = [] for callsite, summary in summaries.items(): # Convert dict to yaml for better formatting. for key, val in summary.items(): if isinstance(val, dict): summary[key] = yaml.dump(val, indent=2) table = [] headers = sorted([key.upper() for key in summary.keys()]) table.append([summary[header.lower()] for header in headers]) table_for_callsite = tabulate( table, headers=headers, showindex=True, numalign=""left"" ) # Format callsite. formatted_callsite = callsite.replace(""|"", ""\n|"") tables.append(f""{formatted_callsite}\n{table_for_callsite}"") time = datetime.now() header = ""="" * 8 + f"" Object Summary: {time} "" + ""="" * 8 table_string = ""\n\n\n"".join(tables) return f"""""" {header} Stats: ------------------------------------ {cluster_info_table} Table (group by {summary_by}) ------------------------------------ {table_string} """""" ","def format_object_summary_output(state_data: Dict) -> str: if len(state_data) == 0: return ""No resource in the cluster"" # Parse the data. cluster_data = state_data[""cluster""] summaries = cluster_data[""summary""] summary_by = cluster_data[""summary_by""] del cluster_data[""summary_by""] del cluster_data[""summary""] cluster_info_table = yaml.dump(cluster_data, indent=2) # Create a table per callsite. tables = [] for callsite, summary in summaries.items(): # Convert dict to yaml for better formatting. for key, val in summary.items(): if isinstance(val, dict): summary[key] = yaml.dump(val, indent=2) table = [] headers = sorted([key.upper() for key in summary.keys()]) table.append([summary[header.lower()] for header in headers]) table_for_callsite = tabulate( table, headers=headers, showindex=True, numalign=""left"" ) # Format callsite. formatted_callsite = callsite.replace(""|"", ""\n|"") tables.append(f""{formatted_callsite}\n{table_for_callsite}"") time = datetime.now() header = ""="" * 8 + f"" Object Summary: {time} "" + ""="" * 8 table_string = ""\n\n\n"".join(tables) return f"""""" {header} Stats: ------------------------------------ {cluster_info_table} Table (group by {summary_by}) ------------------------------------ {table_string} """""" " 11984,"def test_imshow_rgb(): # tests that the alpha of a RGB array passed to imshow is set to 0 # instead of masked z = np.ones((100, 100, 3)) * 0.5 plt_crs = ccrs.LambertAzimuthalEqualArea() latlon_crs = ccrs.PlateCarree() ax = plt.axes(projection=plt_crs) ax.set_extent([-30, -20, 60, 70], crs=latlon_crs) img = ax.imshow(z, extent=[-26, -24, 64, 66], transform=latlon_crs) assert sum(img.get_array().data[:, 0, 3]) == 0 ","def test_imshow_rgb(): # tests that the alpha of a RGB array passed to imshow is set to 0 # instead of masked z = np.full((100, 100, 3), 0.5) plt_crs = ccrs.LambertAzimuthalEqualArea() latlon_crs = ccrs.PlateCarree() ax = plt.axes(projection=plt_crs) ax.set_extent([-30, -20, 60, 70], crs=latlon_crs) img = ax.imshow(z, extent=[-26, -24, 64, 66], transform=latlon_crs) assert sum(img.get_array().data[:, 0, 3]) == 0 " 45724,"def reprojection(R_src, R_dst): """"""Reprojects precipitation fields to the domain of another precipiation field. Parameters ---------- R_src: xarray Three-dimensional xarray with dimensions (t, x, y) containing a time series of precipitation fields. These precipitaiton fields will be reprojected. R_dst: xarray Xarray containing a precipitation field or a time series of precipitation fields. The xarray R_src will be reprojected to the domain of R_dst. Returns ------- R_rprj: xarray Three-dimensional xarray with dimensions (t, x, y) containing the precipitation fields of R_src, but reprojected to the domain of R_dst. """""" # Extract the grid info from R_src src_crs = R_src.attrs[""projection""] x1_src = R_src.x.attrs[""x1""] y2_src = R_src.y.attrs[""y2""] xpixelsize_src = R_src.attrs[""xpixelsize""] ypixelsize_src = R_src.attrs[""ypixelsize""] src_transform = A.translation(float(x1_src), float(y2_src)) * A.scale( float(xpixelsize_src), float(-ypixelsize_src) ) # Extract the grid info from R_dst dst_crs = R_dst.attrs[""projection""] x1_dst = R_dst.x.attrs[""x1""] y2_dst = R_dst.y.attrs[""y2""] xpixelsize_dst = R_dst.attrs[""xpixelsize""] ypixelsize_dst = R_dst.attrs[""ypixelsize""] dst_transform = A.translation(float(x1_dst), float(y2_dst)) * A.scale( float(xpixelsize_dst), float(-ypixelsize_dst) ) # Initialise the reprojected (x)array R_rprj = np.zeros((R_src.shape[0], R_dst.shape[-2], R_dst.shape[-1])) # For every timestep, reproject the precipitation field of R_src to # the domain of R_dst if R_src.attrs[""yorigin""] != R_dst.attrs[""yorigin""]: R_src = R_src[:, ::-1, :] for i in range(R_src.shape[0]): reproject( R_src.values[i, :, :], R_rprj[i, :, :], src_transform=src_transform, src_crs=src_crs, dst_transform=dst_transform, dst_crs=dst_crs, resampling=Resampling.nearest, dst_nodata=np.nan, ) # Assign the necessary attributes from R_src and R_dst to R_rprj R_rprj = xr.DataArray( data=R_rprj, dims=(""t"", ""y"", ""x""), coords=dict( t=(""t"", R_src.coords[""t""].data), x=(""x"", R_dst.coords[""x""].data), y=(""y"", R_dst.coords[""y""].data), ), ) R_rprj.attrs.update(R_src.attrs) R_rprj.x.attrs.update(R_dst.x.attrs) R_rprj.y.attrs.update(R_dst.y.attrs) for key in [""projection"", ""yorigin"", ""xpixelsize"", ""ypixelsize""]: R_rprj.attrs[key] = R_dst.attrs[key] return R_rprj ","def reprojection(R_src, R_dst): """"""Reprojects precipitation fields to the domain of another precipiation field. Parameters ---------- R_src: xarray Three-dimensional xarray with dimensions (t, x, y) containing a time series of precipitation fields. These precipitaiton fields will be reprojected. R_dst: xarray Xarray containing a precipitation field or a time series of precipitation fields. The xarray R_src will be reprojected to the domain of R_dst. Returns ------- R_rprj: xarray Three-dimensional xarray with dimensions (t, x, y) containing the precipitation fields of R_src, but reprojected to the domain of R_dst. """""" # Extract the grid info from R_src src_crs = R_src.attrs[""projection""] x1_src = R_src.x.attrs[""x1""] y2_src = R_src.y.attrs[""y2""] xpixelsize_src = R_src.attrs[""xpixelsize""] ypixelsize_src = R_src.attrs[""ypixelsize""] src_transform = A.translation(float(x1_src), float(y2_src)) * A.scale( float(xpixelsize_src), float(-ypixelsize_src) ) # Extract the grid info from R_dst dst_crs = R_dst.attrs[""projection""] x1_dst = R_dst.x.attrs[""x1""] y2_dst = R_dst.y.attrs[""y2""] xpixelsize_dst = R_dst.attrs[""xpixelsize""] ypixelsize_dst = R_dst.attrs[""ypixelsize""] dst_transform = A.translation(float(x1_dst), float(y2_dst)) * A.scale( float(xpixelsize_dst), float(-ypixelsize_dst) ) # Initialise the reprojected (x)array R_rprj = np.zeros((R_src.shape[0], R_dst.shape[-2], R_dst.shape[-1])) # For every timestep, reproject the precipitation field of R_src to # the domain of R_dst if R_src.attrs[""yorigin""] != R_dst.attrs[""yorigin""]: R_src = R_src[:, ::-1, :] for array_slice in R_src.transpose(""t"", ...): reproject( R_src.values[i, :, :], R_rprj[i, :, :], src_transform=src_transform, src_crs=src_crs, dst_transform=dst_transform, dst_crs=dst_crs, resampling=Resampling.nearest, dst_nodata=np.nan, ) # Assign the necessary attributes from R_src and R_dst to R_rprj R_rprj = xr.DataArray( data=R_rprj, dims=(""t"", ""y"", ""x""), coords=dict( t=(""t"", R_src.coords[""t""].data), x=(""x"", R_dst.coords[""x""].data), y=(""y"", R_dst.coords[""y""].data), ), ) R_rprj.attrs.update(R_src.attrs) R_rprj.x.attrs.update(R_dst.x.attrs) R_rprj.y.attrs.update(R_dst.y.attrs) for key in [""projection"", ""yorigin"", ""xpixelsize"", ""ypixelsize""]: R_rprj.attrs[key] = R_dst.attrs[key] return R_rprj " 17307,"def test_run_app_keepalive_timeout(patched_loop): new_timeout = 1234 base_runner_init_mock = mock.MagicMock() base_runner_init_orig = BaseRunner.__init__ def base_runner_init_spy(self, *args, **kwargs): base_runner_init_mock(*args, **kwargs) base_runner_init_orig(self, *args, **kwargs) with mock.patch.object(BaseRunner, ""__init__"", base_runner_init_spy): app = web.Application() web.run_app(app, keepalive_timeout=new_timeout, print=stopper(patched_loop)) base_runner_init_kwargs = base_runner_init_mock.call_args[1] assert (base_runner_init_kwargs[""keepalive_timeout""] == new_timeout) ","def test_run_app_keepalive_timeout(patched_loop): new_timeout = 1234 base_runner_init_mock = mock.MagicMock() base_runner_init_orig = BaseRunner.__init__ def base_runner_init_spy(self, *args, **kwargs): base_runner_init_mock(*args, **kwargs) base_runner_init_orig(self, *args, **kwargs) with mock.patch.object(BaseRunner, ""__init__"", base_runner_init_spy): app = web.Application() web.run_app(app, keepalive_timeout=new_timeout, print=stopper(patched_loop)) base_runner_init_kwargs = base_runner_init_mock.call_args[1] assert base_runner_init_kwargs[""keepalive_timeout""] == new_timeout " 30139,"def test_request_scope_is_none_when_no_asgi(): app = Sanic(""no_asgi"") @app.get(""/"") async def get(request): return response.empty() request, _ = app.test_client.get(""/"") assert request.scope == None ","def test_request_scope_is_none_when_no_asgi(): app = Sanic(""no_asgi"") @app.get(""/"") async def get(request): return response.empty() request, _ = app.test_client.get(""/"") assert request.scope is None " 23088,"def imread(filename, imread=None, preprocess=None): """"""Read a stack of images into a dask array Parameters ---------- filename: string or list of string A globstring like 'myfile.*.png' or list of filenames imread: function (optional) Optionally provide custom imread function. Function should expect a filename and produce a numpy array. Defaults to ``skimage.io.imread``. preprocess: function (optional) Optionally provide custom function to preprocess the image. Function should expect a numpy array for a single image. Examples -------- >>> from dask.array.image import imread >>> im = imread('2015-*-*.png') # doctest: +SKIP >>> im.shape # doctest: +SKIP (365, 1000, 1000, 3) Returns ------- Dask array of all images stacked along the first dimension. All images will be treated as individual chunks """""" imread = imread or sk_imread if isinstance(filename, str): filenames = sorted(glob(filename)) if not filenames: raise ValueError(""No files found under name %s"" % filename) elif isinstance(filename, str): filenames = filename else: IOError(""filename is neither a 'str' nor a 'list'"") name = ""imread-%s"" % tokenize(filenames, map(os.path.getmtime, filenames)) sample = imread(filenames[0]) if preprocess: sample = preprocess(sample) keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))] if preprocess: values = [ (add_leading_dimension, (preprocess, (imread, fn))) for fn in filenames ] else: values = [(add_leading_dimension, (imread, fn)) for fn in filenames] dsk = dict(zip(keys, values)) chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape) return Array(dsk, name, chunks, sample.dtype) ","def imread(filename, imread=None, preprocess=None): """"""Read a stack of images into a dask array Parameters ---------- filename: string or iterable of strings A globstring like 'myfile.*.png' or list of filenames imread: function (optional) Optionally provide custom imread function. Function should expect a filename and produce a numpy array. Defaults to ``skimage.io.imread``. preprocess: function (optional) Optionally provide custom function to preprocess the image. Function should expect a numpy array for a single image. Examples -------- >>> from dask.array.image import imread >>> im = imread('2015-*-*.png') # doctest: +SKIP >>> im.shape # doctest: +SKIP (365, 1000, 1000, 3) Returns ------- Dask array of all images stacked along the first dimension. All images will be treated as individual chunks """""" imread = imread or sk_imread if isinstance(filename, str): filenames = sorted(glob(filename)) if not filenames: raise ValueError(""No files found under name %s"" % filename) elif isinstance(filename, str): filenames = filename else: IOError(""filename is neither a 'str' nor a 'list'"") name = ""imread-%s"" % tokenize(filenames, map(os.path.getmtime, filenames)) sample = imread(filenames[0]) if preprocess: sample = preprocess(sample) keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))] if preprocess: values = [ (add_leading_dimension, (preprocess, (imread, fn))) for fn in filenames ] else: values = [(add_leading_dimension, (imread, fn)) for fn in filenames] dsk = dict(zip(keys, values)) chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape) return Array(dsk, name, chunks, sample.dtype) " 11700,"def parametrize(tests, arity=None): '''Helper for parametrizing pytest tests. Expect a list of lambdas, one per test. Each lambda must return the parameters for its respecting test. Test identifiers will be automatically generated, from the test number and its lambda definition line (1.10, 2.12, 3.20, ...). If arity is None, the arguments being parametrized will be automatically set from the function last arguments, according to the numbers of parameters for each test. ''' ids = [] argvalues = [] for n, t in enumerate(tests): line = inspect.getsourcelines(t)[1] ids.append('%u:%u' % (n+1, line)) argvalues.append(t()) if arity is None: arity = len(argvalues[0]) assert arity > 0 def decorator(fn): argnames = list( parameter.name for parameter in inspect.signature(fn).parameters.values() if parameter.default is inspect.Parameter.empty )[-arity:] if arity == 1: argnames = argnames[0] return pytest.mark.parametrize(argnames, argvalues, ids=ids)(fn) return decorator ","def parametrize(tests, arity=None): '''Helper for parametrizing pytest tests. Expect a list of lambdas, one per test. Each lambda must return the parameters for its respective test. Test identifiers will be automatically generated, from the test number and its lambda definition line (1.10, 2.12, 3.20, ...). If arity is None, the arguments being parametrized will be automatically set from the function last arguments, according to the numbers of parameters for each test. ''' ids = [] argvalues = [] for n, t in enumerate(tests): line = inspect.getsourcelines(t)[1] ids.append('%u:%u' % (n+1, line)) argvalues.append(t()) if arity is None: arity = len(argvalues[0]) assert arity > 0 def decorator(fn): argnames = list( parameter.name for parameter in inspect.signature(fn).parameters.values() if parameter.default is inspect.Parameter.empty )[-arity:] if arity == 1: argnames = argnames[0] return pytest.mark.parametrize(argnames, argvalues, ids=ids)(fn) return decorator " 20734,"def test_getProjectsFirstPage(api_client): # setup http_manager = MagicMock() api_client._http = http_manager pagination_manager = api_client._projects_pagination_mgr pagination_manager.limit = 20 finished_callback = MagicMock() failed_callback = MagicMock() # Call api_client.getProjectsFirstPage(on_finished = finished_callback, failed = failed_callback) # Asserts pagination_manager.reset.assert_called_once() # Should be called since we asked for new set of projects http_manager.get.assert_called_once() args = http_manager.get.call_args_list[0] # Ensure that it's called with the right limit assert args[0][0] == 'https://api.ultimaker.com/cura/v1/projects?limit=20' # Change the limit & try again http_manager.get.reset_mock() pagination_manager.limit = 80 api_client.getProjectsFirstPage(on_finished=finished_callback, failed=failed_callback) args = http_manager.get.call_args_list[0] # Ensure that it's called with the right limit assert args[0][0] == 'https://api.ultimaker.com/cura/v1/projects?limit=80' ","def test_getProjectsFirstPage(api_client): # setup http_manager = MagicMock() api_client._http = http_manager pagination_manager = api_client._projects_pagination_mgr pagination_manager.limit = 20 finished_callback = MagicMock() failed_callback = MagicMock() # Call api_client.getProjectsFirstPage(on_finished = finished_callback, failed = failed_callback) # Asserts pagination_manager.reset.assert_called_once() # Should be called since we asked for new set of projects http_manager.get.assert_called_once() args = http_manager.get.call_args_list[0] # Ensure that it's called with the right limit assert args[0][0] == ""https://api.ultimaker.com/cura/v1/projects?limit=20"" # Change the limit & try again http_manager.get.reset_mock() pagination_manager.limit = 80 api_client.getProjectsFirstPage(on_finished=finished_callback, failed=failed_callback) args = http_manager.get.call_args_list[0] # Ensure that it's called with the right limit assert args[0][0] == 'https://api.ultimaker.com/cura/v1/projects?limit=80' " 7211,"def interpolate(image, xslice, yslice, mapLU, mapRU, mapLB, mapRB, lut): """"""Find the new grayscale level for a region using bilinear interpolation. Parameters ---------- image : ndarray Full image. xslice, yslice : array-like Indices of the region. map* : ndarray Mappings of greylevels from histograms. lut : ndarray Maps grayscale levels in image to histogram levels. Returns ------- out : ndarray Original image with the subregion replaced. Notes ----- This function calculates the new greylevel assignments of pixels within a submatrix of the image. This is done by a bilinear interpolation between four different mappings in order to eliminate boundary artifacts. """""" warnings.warn(""interpolate is deprecated and will be removed in version "" ""0.19. Please use the rivate function _interpolate "" ""instead."", category=FutureWarning, stacklevel=2) xslice = slice(xslice[0], xslice[-1] + 1) yslice = slice(yslice[0], yslice[-1] + 1) return _interpolate(image, xslice, yslice, mapLU, mapRU, mapLB, mapRB, lut) ","def interpolate(image, xslice, yslice, mapLU, mapRU, mapLB, mapRB, lut): """"""Find the new grayscale level for a region using bilinear interpolation. Parameters ---------- image : ndarray Full image. xslice, yslice : array-like Indices of the region. map* : ndarray Mappings of greylevels from histograms. lut : ndarray Maps grayscale levels in image to histogram levels. Returns ------- out : ndarray Original image with the subregion replaced. Notes ----- This function calculates the new greylevel assignments of pixels within a submatrix of the image. This is done by a bilinear interpolation between four different mappings in order to eliminate boundary artifacts. """""" warnings.warn(""interpolate is deprecated and will be removed in version "" ""0.19. Please use the private function _interpolate "" ""instead."", category=FutureWarning, stacklevel=2) xslice = slice(xslice[0], xslice[-1] + 1) yslice = slice(yslice[0], yslice[-1] + 1) return _interpolate(image, xslice, yslice, mapLU, mapRU, mapLB, mapRB, lut) " 31625,"def validate_cn(default_base_dn, cn): changing_cn = cn i = 1 while check_if_user_exists_by_cn(default_base_dn, changing_cn): changing_cn = cn+str(i) i += 1 return changing_cn ","def generate_unique_cn(default_base_dn, cn): changing_cn = cn i = 1 while check_if_user_exists_by_cn(default_base_dn, changing_cn): changing_cn = cn+str(i) i += 1 return changing_cn " 7284,"def _supported_float_type(input_dtype, allow_complex=False): """"""Return an appropriate floating-point dtype for a given dtype. float32, float64, complex64, complex128 are preserved. float16 is promoted to float32. complex256 is demoted to complex128. Other types are cast to float64. Paramters --------- input_dtype : np.dtype or Iterable of np.dtype The input dtype. If a sequence of multiple dtypes is provided, each dtype is first converted to a supported floating point type and the final dtype is then determined by applying `np.result_type` on the sequence of supported floating point types. allow_complex : bool, optional If False, raise a ValueError on complex-valued inputs. Retruns ------- float_type : dtype Floating-point dtype for the image. """""" if isinstance(input_dtype, Iterable) and not isinstance(input_dtype, str): return np.result_type(*(_supported_float_type(d) for d in input_dtype)) input_dtype = np.dtype(input_dtype) if not allow_complex and input_dtype.kind == 'c': raise ValueError(""complex valued input is not supported"") return new_float_type.get(input_dtype.char, np.float64) ","def _supported_float_type(input_dtype, allow_complex=False): """"""Return an appropriate floating-point dtype for a given dtype. float32, float64, complex64, complex128 are preserved. float16 is promoted to float32. complex256 is demoted to complex128. Other types are cast to float64. Paramters --------- input_dtype : np.dtype or Iterable of np.dtype The input dtype. If a sequence of multiple dtypes is provided, each dtype is first converted to a supported floating point type and the final dtype is then determined by applying `np.result_type` on the sequence of supported floating point types. allow_complex : bool, optional If False, raise a ValueError on complex-valued inputs. Returns ------- float_type : dtype Floating-point dtype for the image. """""" if isinstance(input_dtype, Iterable) and not isinstance(input_dtype, str): return np.result_type(*(_supported_float_type(d) for d in input_dtype)) input_dtype = np.dtype(input_dtype) if not allow_complex and input_dtype.kind == 'c': raise ValueError(""complex valued input is not supported"") return new_float_type.get(input_dtype.char, np.float64) " 25780,"def define_ramp_limit_constraints(n, sns, c='Generator', commitable=True): """""" Defines ramp limits for generators and links with valid ramplimit. """""" test_components = ['Generator', 'Link'] assert c in test_components, 'Ramp limit constraints were only tested for Generator and Link.' rup_i = n.df(c).query('ramp_limit_up == ramp_limit_up').index rdown_i = n.df(c).query('ramp_limit_down == ramp_limit_down').index if rup_i.empty & rdown_i.empty: return fix_i = get_non_extendable_i(n, c) ext_i = get_extendable_i(n, c) p = get_var(n, c, 'p').loc[sns[1:]] p_prev = get_var(n, c, 'p').shift(1).loc[sns[1:]] active = get_activity_mask(n, c, sns[1:]) # fix up gens_i = rup_i.intersection(fix_i) if not gens_i.empty: lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i])) rhs = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom') kwargs = dict(spec='nonext.', mask=active[gens_i]) define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs) # ext up gens_i = rup_i.intersection(ext_i) if not gens_i.empty: limit_pu = n.df(c)['ramp_limit_up'][gens_i] p_nom = get_var(n, c, 'p_nom')[gens_i] lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (-limit_pu, p_nom)) kwargs = dict(spec='ext.', mask=active[gens_i]) define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', **kwargs) # fix down gens_i = rdown_i.intersection(fix_i) if not gens_i.empty: lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i])) rhs = n.df(c).loc[gens_i].eval('-1 * ramp_limit_down * p_nom') kwargs = dict(spec='nonext.', mask=active[gens_i]) define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs) # ext down gens_i = rdown_i.intersection(ext_i) if not gens_i.empty: limit_pu = n.df(c)['ramp_limit_down'][gens_i] p_nom = get_var(n, c, 'p_nom')[gens_i] lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_pu, p_nom)) kwargs = dict(spec='ext.', mask=active[gens_i]) define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', **kwargs) if commitable: assert c=='Generator', 'Commitable contraints were only tested for Generator.' com_i = n.df(c).query('committable').index.difference(ext_i) # com up gens_i = rup_i.intersection(com_i) if not gens_i.empty: limit_start = n.df(c).loc[gens_i].eval('ramp_limit_start_up * p_nom') limit_up = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom') status = get_var(n, c, 'status').loc[sns[1:], gens_i] status_prev = get_var(n, c, 'status').shift(1).loc[sns[1:], gens_i] lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_start - limit_up, status_prev), (- limit_start, status)) kwargs = dict(spec='com.', mask=active[gens_i]) define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', **kwargs) # com down gens_i = rdown_i.intersection(com_i) if not gens_i.empty: limit_shut = n.df(c).loc[gens_i].eval('ramp_limit_shut_down * p_nom') limit_down = n.df(c).loc[gens_i].eval('ramp_limit_down * p_nom') status = get_var(n, c, 'status').loc[sns[1:], gens_i] status_prev = get_var(n, c, 'status').shift(1).loc[sns[1:], gens_i] lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_down - limit_shut, status), (limit_shut, status_prev)) kwargs = dict(spec='com.', mask=active[gens_i]) define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', **kwargs) ","def define_ramp_limit_constraints(n, sns, c='Generator', commitable=True): """""" Defines ramp limits for a given component with valid ramplimit. """""" test_components = ['Generator', 'Link'] assert c in test_components, 'Ramp limit constraints were only tested for Generator and Link.' rup_i = n.df(c).query('ramp_limit_up == ramp_limit_up').index rdown_i = n.df(c).query('ramp_limit_down == ramp_limit_down').index if rup_i.empty & rdown_i.empty: return fix_i = get_non_extendable_i(n, c) ext_i = get_extendable_i(n, c) p = get_var(n, c, 'p').loc[sns[1:]] p_prev = get_var(n, c, 'p').shift(1).loc[sns[1:]] active = get_activity_mask(n, c, sns[1:]) # fix up gens_i = rup_i.intersection(fix_i) if not gens_i.empty: lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i])) rhs = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom') kwargs = dict(spec='nonext.', mask=active[gens_i]) define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs) # ext up gens_i = rup_i.intersection(ext_i) if not gens_i.empty: limit_pu = n.df(c)['ramp_limit_up'][gens_i] p_nom = get_var(n, c, 'p_nom')[gens_i] lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (-limit_pu, p_nom)) kwargs = dict(spec='ext.', mask=active[gens_i]) define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', **kwargs) # fix down gens_i = rdown_i.intersection(fix_i) if not gens_i.empty: lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i])) rhs = n.df(c).loc[gens_i].eval('-1 * ramp_limit_down * p_nom') kwargs = dict(spec='nonext.', mask=active[gens_i]) define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs) # ext down gens_i = rdown_i.intersection(ext_i) if not gens_i.empty: limit_pu = n.df(c)['ramp_limit_down'][gens_i] p_nom = get_var(n, c, 'p_nom')[gens_i] lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_pu, p_nom)) kwargs = dict(spec='ext.', mask=active[gens_i]) define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', **kwargs) if commitable: assert c=='Generator', 'Commitable contraints were only tested for Generator.' com_i = n.df(c).query('committable').index.difference(ext_i) # com up gens_i = rup_i.intersection(com_i) if not gens_i.empty: limit_start = n.df(c).loc[gens_i].eval('ramp_limit_start_up * p_nom') limit_up = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom') status = get_var(n, c, 'status').loc[sns[1:], gens_i] status_prev = get_var(n, c, 'status').shift(1).loc[sns[1:], gens_i] lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_start - limit_up, status_prev), (- limit_start, status)) kwargs = dict(spec='com.', mask=active[gens_i]) define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', **kwargs) # com down gens_i = rdown_i.intersection(com_i) if not gens_i.empty: limit_shut = n.df(c).loc[gens_i].eval('ramp_limit_shut_down * p_nom') limit_down = n.df(c).loc[gens_i].eval('ramp_limit_down * p_nom') status = get_var(n, c, 'status').loc[sns[1:], gens_i] status_prev = get_var(n, c, 'status').shift(1).loc[sns[1:], gens_i] lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_down - limit_shut, status), (limit_shut, status_prev)) kwargs = dict(spec='com.', mask=active[gens_i]) define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', **kwargs) " 24703,"def _override_qos_policy_with_param(qos: QoSProfile, policy: QoSPolicyKind, param: Parameter): value = param.value policy_name = policy.name.lower() if policy in ( QoSPolicyKind.LIVELINESS, QoSPolicyKind.RELIABILITY, QoSPolicyKind.HISTORY, QoSPolicyKind.DURABILITY ): def capitalize_first_letter(x): return x[0].upper() + x[1:] # e.g. `policy=QosPolicyKind.LIVELINESS` -> `policy_enum_class=rclpy.qos.LivelinessPolicy` policy_enum_class = getattr( rclpy.qos, f'{capitalize_first_letter(policy_name)}Policy') try: value = policy_enum_class[value.upper()] except KeyError: raise RuntimeError( f'Unexpected qos override for policy `{policy.name.lower()}`: `{value}`') if policy in ( QoSPolicyKind.LIFESPAN, QoSPolicyKind.DEADLINE, QoSPolicyKind.LIVELINESS_LEASE_DURATION ): value = Duration(nanoseconds=value) setattr(qos, policy.name.lower(), value) ","def _override_qos_policy_with_param(qos: QoSProfile, policy: QoSPolicyKind, param: Parameter): value = param.value policy_name = policy.name.lower() if policy in ( QoSPolicyKind.LIVELINESS, QoSPolicyKind.RELIABILITY, QoSPolicyKind.HISTORY, QoSPolicyKind.DURABILITY ): def capitalize_first_letter(x): return x[0].upper() + x[1:] # e.g. `policy=QosPolicyKind.LIVELINESS` -> `policy_enum_class=rclpy.qos.LivelinessPolicy` policy_enum_class = getattr( rclpy.qos, f'{capitalize_first_letter(policy_name)}Policy') try: value = policy_enum_class[value.upper()] except KeyError: raise RuntimeError( f'Unexpected QoS override for policy `{policy.name.lower()}`: `{value}`') if policy in ( QoSPolicyKind.LIFESPAN, QoSPolicyKind.DEADLINE, QoSPolicyKind.LIVELINESS_LEASE_DURATION ): value = Duration(nanoseconds=value) setattr(qos, policy.name.lower(), value) " 7193,"def corner_peaks(image, min_distance=1, threshold_abs=None, threshold_rel=None, exclude_border=True, indices=True, num_peaks=np.inf, footprint=None, labels=None, *, num_peaks_per_label=np.inf, p=np.inf): """"""Find corners in corner measure response image. This differs from `skimage.feature.peak_local_max` in that it suppresses multiple connected peaks with the same accumulator value. Parameters ---------- image : ndarray Input image. min_distance : int, optional The minimum distance seperating peaks. Use the ``p`` argument to set the Minkowski p-norm defining the distance. * : * See :py:meth:`skimage.feature.peak_local_max`. p : float Which Minkowski p-norm to use. Should be in the range [1, inf]. A finite large p may cause a ValueError if overflow can occur. inf corresponds to the chebychev distance and 2 to the euclidean distance. Returns ------- output : ndarray or ndarray of bools * If `indices = True` : (row, column, ...) coordinates of peaks. * If `indices = False` : Boolean array shaped like `image`, with peaks represented by True values. See also -------- skimage.feature.peak_local_max Notes ----- The `num_peaks` limit is applied before suppression of connected peaks. If you want to limit the number of peaks after suppression, you should set `num_peaks=np.inf` and post-process the output of this function. Examples -------- >>> from skimage.feature import peak_local_max >>> response = np.zeros((5, 5)) >>> response[2:4, 2:4] = 1 >>> response array([[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 1., 1., 0.], [0., 0., 1., 1., 0.], [0., 0., 0., 0., 0.]]) >>> peak_local_max(response) array([[3, 3], [3, 2], [2, 3], [2, 2]]) >>> corner_peaks(response) array([[3, 3]]) """""" if threshold_rel is None: threshold_rel = 0.1 warn(""Until the version 0.16, threshold_rel was set to 0.1 by default."" ""Starting from version 0.16, the default value is set to None."" ""Until version 0.18, a None value corresponds to a threshold "" ""value of 0.1. The default behavior will match "" ""skimage.feature.peak_local_max."", category=FutureWarning, stacklevel=2) # Get the coordinates of the detected peaks coords = peak_local_max(image, min_distance=min_distance, threshold_abs=threshold_abs, threshold_rel=threshold_rel, exclude_border=exclude_border, indices=True, num_peaks=num_peaks, footprint=footprint, labels=labels, num_peaks_per_label=num_peaks_per_label) if len(coords): # Use KDtree to find the peaks that are too close to each others tree = spatial.cKDTree(coords) rejected_peaks = set() for idx, point in enumerate(coords): if idx not in rejected_peaks: candidates = tree.query_ball_point(point, r=min_distance, p=p) candidates.remove(idx) rejected_peaks.update(candidates) # Remove the peaks that are too close to each others coords = np.delete(coords, tuple(rejected_peaks), axis=0)[::-1] if indices is True: return coords peaks = np.zeros_like(image, dtype=bool) peaks[tuple(coords.T)] = True return peaks ","def corner_peaks(image, min_distance=1, threshold_abs=None, threshold_rel=None, exclude_border=True, indices=True, num_peaks=np.inf, footprint=None, labels=None, *, num_peaks_per_label=np.inf, p=np.inf): """"""Find corners in corner measure response image. This differs from `skimage.feature.peak_local_max` in that it suppresses multiple connected peaks with the same accumulator value. Parameters ---------- image : ndarray Input image. min_distance : int, optional The minimum distance seperating peaks. Use the ``p`` argument to set the Minkowski p-norm defining the distance. * : * See :py:meth:`skimage.feature.peak_local_max`. p : float Which Minkowski p-norm to use. Should be in the range [1, inf]. A finite large p may cause a ValueError if overflow can occur. inf corresponds to the chebychev distance and 2 to the euclidean distance. Returns ------- output : ndarray or ndarray of bools * If `indices = True` : (row, column, ...) coordinates of peaks. * If `indices = False` : Boolean array shaped like `image`, with peaks represented by True values. See also -------- skimage.feature.peak_local_max Notes ----- The `num_peaks` limit is applied before suppression of connected peaks. If you want to limit the number of peaks after suppression, you should set `num_peaks=np.inf` and post-process the output of this function. Examples -------- >>> from skimage.feature import peak_local_max >>> response = np.zeros((5, 5)) >>> response[2:4, 2:4] = 1 >>> response array([[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 1., 1., 0.], [0., 0., 1., 1., 0.], [0., 0., 0., 0., 0.]]) >>> peak_local_max(response) array([[3, 3], [3, 2], [2, 3], [2, 2]]) >>> corner_peaks(response) array([[3, 3]]) """""" if threshold_rel is None: threshold_rel = 0.1 warn(""Until the version 0.16, threshold_rel was set to 0.1 by default."" ""Starting from version 0.16, the default value is set to None."" ""Until version 0.18, a None value corresponds to a threshold "" ""value of 0.1. The default behavior will match "" ""skimage.feature.peak_local_max."", category=FutureWarning, stacklevel=2) # Get the coordinates of the detected peaks coords = peak_local_max(image, min_distance=min_distance, threshold_abs=threshold_abs, threshold_rel=threshold_rel, exclude_border=exclude_border, indices=True, num_peaks=num_peaks, footprint=footprint, labels=labels, num_peaks_per_label=num_peaks_per_label) if len(coords): # Use KDtree to find the peaks that are too close to each other tree = spatial.cKDTree(coords) rejected_peaks = set() for idx, point in enumerate(coords): if idx not in rejected_peaks: candidates = tree.query_ball_point(point, r=min_distance, p=p) candidates.remove(idx) rejected_peaks.update(candidates) # Remove the peaks that are too close to each others coords = np.delete(coords, tuple(rejected_peaks), axis=0)[::-1] if indices is True: return coords peaks = np.zeros_like(image, dtype=bool) peaks[tuple(coords.T)] = True return peaks " 58841,"def inv(a): """"""Computes the inverse of a matrix. This function computes matrix ``a_inv`` from n-dimensional regular matrix ``a`` such that ``dot(a, a_inv) == eye(n)``. Args: a (cupy.ndarray): The regular matrix Returns: cupy.ndarray: The inverse of a matrix. .. warning:: This function calls one or more cuSOLVER routine(s) which may yield invalid results if input conditions are not met. To detect these invalid results, you can set the `linalg` configuration to a value that is not `ignore` in :func:`cupyx.errstate` or :func:`cupyx.seterr`. .. seealso:: :func:`numpy.linalg.inv` """""" if a.ndim >= 3: return _batched_inv(a) _util._assert_cupy_array(a) _util._assert_rank2(a) _util._assert_nd_squareness(a) dtype, out_dtype = _util.linalg_common_type(a) order = 'F' if a._f_contiguous else 'C' # prevent 'a' to be overwritten a = a.astype(dtype, copy=True, order=order) cusolver_handle = device.get_cusolver_handle() dev_info = cupy.empty(1, dtype=numpy.int32) print('types ', a.dtype, dtype, out_dtype) ipiv = cupy.empty((a.shape[0], 1), dtype=numpy.intc) if dtype == 'f': getrf = cusolver.sgetrf getrf_bufferSize = cusolver.sgetrf_bufferSize getrs = cusolver.sgetrs elif dtype == 'd': getrf = cusolver.dgetrf getrf_bufferSize = cusolver.dgetrf_bufferSize getrs = cusolver.dgetrs elif dtype == 'F': getrf = cusolver.cgetrf getrf_bufferSize = cusolver.cgetrf_bufferSize getrs = cusolver.cgetrs elif dtype == 'D': getrf = cusolver.zgetrf getrf_bufferSize = cusolver.zgetrf_bufferSize getrs = cusolver.zgetrs else: msg = ('dtype must be float32, float64, complex64 or complex128' ' (actual: {})'.format(a.dtype)) raise ValueError(msg) m = a.shape[0] buffersize = getrf_bufferSize(cusolver_handle, m, m, a.data.ptr, m) workspace = cupy.empty(buffersize, dtype=dtype) # LU factorization getrf( cusolver_handle, m, m, a.data.ptr, m, workspace.data.ptr, ipiv.data.ptr, dev_info.data.ptr) cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( getrf, dev_info) b = cupy.eye(m, dtype=dtype) # solve for the inverse getrs( cusolver_handle, 0, m, m, a.data.ptr, m, ipiv.data.ptr, b.data.ptr, m, dev_info.data.ptr) cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( getrs, dev_info) return b.astype(out_dtype, copy=False) ","def inv(a): """"""Computes the inverse of a matrix. This function computes matrix ``a_inv`` from n-dimensional regular matrix ``a`` such that ``dot(a, a_inv) == eye(n)``. Args: a (cupy.ndarray): The regular matrix Returns: cupy.ndarray: The inverse of a matrix. .. warning:: This function calls one or more cuSOLVER routine(s) which may yield invalid results if input conditions are not met. To detect these invalid results, you can set the `linalg` configuration to a value that is not `ignore` in :func:`cupyx.errstate` or :func:`cupyx.seterr`. .. seealso:: :func:`numpy.linalg.inv` """""" if a.ndim >= 3: return _batched_inv(a) _util._assert_cupy_array(a) _util._assert_rank2(a) _util._assert_nd_squareness(a) dtype, out_dtype = _util.linalg_common_type(a) order = 'F' if a._f_contiguous else 'C' # prevent 'a' to be overwritten a = a.astype(dtype, copy=True, order=order) cusolver_handle = device.get_cusolver_handle() dev_info = cupy.empty(1, dtype=numpy.int32) ipiv = cupy.empty((a.shape[0], 1), dtype=numpy.intc) if dtype == 'f': getrf = cusolver.sgetrf getrf_bufferSize = cusolver.sgetrf_bufferSize getrs = cusolver.sgetrs elif dtype == 'd': getrf = cusolver.dgetrf getrf_bufferSize = cusolver.dgetrf_bufferSize getrs = cusolver.dgetrs elif dtype == 'F': getrf = cusolver.cgetrf getrf_bufferSize = cusolver.cgetrf_bufferSize getrs = cusolver.cgetrs elif dtype == 'D': getrf = cusolver.zgetrf getrf_bufferSize = cusolver.zgetrf_bufferSize getrs = cusolver.zgetrs else: msg = ('dtype must be float32, float64, complex64 or complex128' ' (actual: {})'.format(a.dtype)) raise ValueError(msg) m = a.shape[0] buffersize = getrf_bufferSize(cusolver_handle, m, m, a.data.ptr, m) workspace = cupy.empty(buffersize, dtype=dtype) # LU factorization getrf( cusolver_handle, m, m, a.data.ptr, m, workspace.data.ptr, ipiv.data.ptr, dev_info.data.ptr) cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( getrf, dev_info) b = cupy.eye(m, dtype=dtype) # solve for the inverse getrs( cusolver_handle, 0, m, m, a.data.ptr, m, ipiv.data.ptr, b.data.ptr, m, dev_info.data.ptr) cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( getrs, dev_info) return b.astype(out_dtype, copy=False) " 32124,"def test_build_search_human_readable_multi_table_in_query(mocker): """""" Given: multiple table headers in query When: building a human readable table as part of splunk-search Then: Test headers are calculated correctly: * all expected header exist without duplications """""" args = {""query"": "" table header_1, header_2 | stats state_1, state_2 | table header_1, header_2, header_3, header_4""} results = [ {'header_1': 'val_1', 'header_2': 'val_2', 'header_3': 'val_3', 'header_4': 'val_4'}, ] expected_headers_hr = ""|header_1|header_2|header_3|header_4|\n|---|---|---|---|"" hr = splunk.build_search_human_readable(args, results) assert expected_headers_hr in hr ","def test_build_search_human_readable_multi_table_in_query(mocker): """""" Given: multiple table headers in query When: building a human readable table as part of splunk-search Then: Test headers are calculated correctly: * all expected header exist without duplications """""" args = {""query"": "" table header_1, header_2 | stats state_1, state_2 | table header_1, header_2, header_3, header_4""} results = [ {'header_1': 'val_1', 'header_2': 'val_2', 'header_3': 'val_3', 'header_4': 'val_4'}, ] expected_headers_hr = ""|header_1|header_2|header_3|header_4|\n|---|---|---|---|"" assert expected_headers_hr in splunk.build_search_human_readable(args, results) " 13146,"def test_list_project_packages(project, resp_list_package_files): package = project.packages.get(1, lazy=True) package_files = package.package_files.list() assert isinstance(package_files, list) assert isinstance(package_files[0], ProjectPackageFile) assert package_files[0].id == 25 ","def test_list_project_package_files(project, resp_list_package_files): package = project.packages.get(1, lazy=True) package_files = package.package_files.list() assert isinstance(package_files, list) assert isinstance(package_files[0], ProjectPackageFile) assert package_files[0].id == 25 " 47970,"def compile(reporter, compiler_path, model, model_precision, args, output_dir): (output_dir / model.subdirectory).mkdir(parents=True, exist_ok=True) compile_cmd = [str(compiler_path), '-m={}'.format(args.model_dir / model.subdirectory / model_precision / (model.name + '.xml')), '-d={}'.format(args.target_device), '-ip={}'.format('U8' if args.input_precision is None else args.input_precision), '-op={}'.format('FP32' if args.output_precision is None else args.output_precision), '-o={}'.format(output_dir / model.subdirectory / model_precision /(model.name + '.blob')), ] reporter.print_section_heading('{}Compiling {} to BLOB ({})', '(DRY RUN) ' if args.dry_run else '', model.name, model_precision) reporter.print('Conversion command: {}', common.command_string(compile_cmd)) if not args.dry_run: reporter.print(flush=True) if not reporter.job_context.subprocess(compile_cmd): return False reporter.print() return True ","def compile(reporter, compiler_path, model, model_precision, args, output_dir): (output_dir / model.subdirectory).mkdir(parents=True, exist_ok=True) compile_cmd = [str(compiler_path), '-m={}'.format(args.model_dir / model.subdirectory / model_precision / (model.name + '.xml')), '-d={}'.format(args.target_device), '-ip={}'.format('U8' if args.input_precision is None else args.input_precision), '-op={}'.format('FP32' if args.output_precision is None else args.output_precision), '-o={}'.format(output_dir / model.subdirectory / model_precision / (model.name + '.blob')), ] reporter.print_section_heading('{}Compiling {} to BLOB ({})', '(DRY RUN) ' if args.dry_run else '', model.name, model_precision) reporter.print('Conversion command: {}', common.command_string(compile_cmd)) if not args.dry_run: reporter.print(flush=True) if not reporter.job_context.subprocess(compile_cmd): return False reporter.print() return True " 50356,"def _get_params(func: Callable) -> Dict[str, inspect.Parameter]: """"""Get non-vararg parameters of `func` as an ordered dict."""""" var_param_kinds = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD) try: params = list(inspect.signature(func).parameters.values()) except Exception: # `inspect.signature` doesn't work on ufunc objects, but we can work out # what the required parameters would look like if it did. if not _is_probably_ufunc(func): raise # Note that we use args named a, b, c... to match the `operator` module, # rather than x1, x2, x3... like the Numpy docs. Because they're pos-only # this doesn't make a runtime difference, and it's much nicer for use-cases # like `equivalent(numpy.add, operator.add)`. params = [ inspect.Parameter(name=name, kind=inspect.Parameter.POSITIONAL_ONLY) for name in ascii_lowercase[: func.nin] # type: ignore ] return OrderedDict((p.name, p) for p in params if p.kind not in var_param_kinds) ","def _get_params(func: Callable) -> Dict[str, inspect.Parameter]: """"""Get non-vararg parameters of `func` as an ordered dict."""""" var_param_kinds = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD) try: params = list(inspect.signature(func).parameters.values()) except Exception as e: # `inspect.signature` doesn't work on ufunc objects, but we can work out # what the required parameters would look like if it did. if not _is_probably_ufunc(func): raise e # Note that we use args named a, b, c... to match the `operator` module, # rather than x1, x2, x3... like the Numpy docs. Because they're pos-only # this doesn't make a runtime difference, and it's much nicer for use-cases # like `equivalent(numpy.add, operator.add)`. params = [ inspect.Parameter(name=name, kind=inspect.Parameter.POSITIONAL_ONLY) for name in ascii_lowercase[: func.nin] # type: ignore ] return OrderedDict((p.name, p) for p in params if p.kind not in var_param_kinds) " 31930,"def main(): incident = demisto.incident() if not incident: raise ValueError(""Error - demisto.incident() expected to return current incident "" ""from context but returned None"") custom_fields = incident.get('CustomFields', {}) asset_results_str = custom_fields.get('assettable', {}) is_successful = custom_fields.get('successfulidentityenrichment', '') if is_successful == 'false': return {'ContentsFormat': formats['markdown'], 'Contents': 'Asset enrichment failed.'} asset_results = json.loads(asset_results_str) if not asset_results: return {'ContentsFormat': formats['markdown'], 'Contents': 'No assets were found in notable'} if isinstance(asset_results, list): events_arr = [] for event in asset_results: events_arr.append(event) markdown = tableToMarkdown("""", events_arr, headers=events_arr[0].keys()) else: markdown = tableToMarkdown("""", asset_results) return {'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'], 'Contents': markdown} ","def main(): incident = demisto.incident() if not incident: raise ValueError(""Error - demisto.incident() expected to return current incident "" ""from context but returned None"") custom_fields = incident.get('CustomFields', {}) asset_results_str = custom_fields.get('assettable', {}) is_successful = custom_fields.get('successfulidentityenrichment', '') if is_successful == 'false': return {'ContentsFormat': formats['markdown'], 'Contents': 'Asset enrichment failed.'} asset_results = json.loads(asset_results_str) if not asset_results: return {'ContentsFormat': formats['markdown'], 'Contents': 'No assets were found in the notable'} if isinstance(asset_results, list): events_arr = [] for event in asset_results: events_arr.append(event) markdown = tableToMarkdown("""", events_arr, headers=events_arr[0].keys()) else: markdown = tableToMarkdown("""", asset_results) return {'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'], 'Contents': markdown} " 27959,"def ctu_mapping(clang_version_info): """"""Clang version dependent ctu mapping tool path and mapping file name. The path of the mapping tool, which is assumed to be located inside the installed directory of the analyzer. Certain binary distributions can postfix the the tool name with the major version number, the the number and the tool name being separated by a dash. By default the shorter name is looked up, then if it is not found the postfixed. """""" if not clang_version_info: LOG.debug(""No clang version information"" ""can not detect ctu mapping tool."") return None, None old_mapping_tool_name = 'clang-func-mapping' old_mapping_file_name = 'externalFnMap.txt' new_mapping_tool_name = 'clang-extdef-mapping' new_mapping_file_name = 'externalDefMap.txt' major_version = clang_version_info.major_version if major_version > 7: tool_name = new_mapping_tool_name mapping_file = new_mapping_file_name else: tool_name = old_mapping_tool_name mapping_file = old_mapping_file_name installed_dir = clang_version_info.installed_dir tool_path = os.path.join(installed_dir, tool_name) if os.path.isfile(tool_path): return tool_path, mapping_file LOG.debug( ""Mapping tool '{}' suggested by autodetection is not found in "" ""directory reported by Clang '{}'. Trying with version-postfixed "" ""filename..."".format(tool_path, installed_dir)) postfixed_tool_path = ''.join([tool_path, '-', str(major_version)]) if os.path.isfile(postfixed_tool_path): return postfixed_tool_path, mapping_file LOG.debug( ""Postfixed mapping tool '{}' suggested by autodetection is not "" ""found in directory reported by Clang '{}'."" .format(postfixed_tool_path, installed_dir)) return None, None ","def ctu_mapping(clang_version_info): """"""Clang version dependent ctu mapping tool path and mapping file name. The path of the mapping tool, which is assumed to be located inside the installed directory of the analyzer. Certain binary distributions can postfix the the tool name with the major version number, the the number and the tool name being separated by a dash. By default the shorter name is looked up, then if it is not found the postfixed. """""" if not clang_version_info: LOG.debug(""No clang version information"" ""Can not detect ctu mapping tool."") return None, None old_mapping_tool_name = 'clang-func-mapping' old_mapping_file_name = 'externalFnMap.txt' new_mapping_tool_name = 'clang-extdef-mapping' new_mapping_file_name = 'externalDefMap.txt' major_version = clang_version_info.major_version if major_version > 7: tool_name = new_mapping_tool_name mapping_file = new_mapping_file_name else: tool_name = old_mapping_tool_name mapping_file = old_mapping_file_name installed_dir = clang_version_info.installed_dir tool_path = os.path.join(installed_dir, tool_name) if os.path.isfile(tool_path): return tool_path, mapping_file LOG.debug( ""Mapping tool '{}' suggested by autodetection is not found in "" ""directory reported by Clang '{}'. Trying with version-postfixed "" ""filename..."".format(tool_path, installed_dir)) postfixed_tool_path = ''.join([tool_path, '-', str(major_version)]) if os.path.isfile(postfixed_tool_path): return postfixed_tool_path, mapping_file LOG.debug( ""Postfixed mapping tool '{}' suggested by autodetection is not "" ""found in directory reported by Clang '{}'."" .format(postfixed_tool_path, installed_dir)) return None, None " 58823,"def axpy(a, x, y): """"""Computes y += a * x. (*) y will be updated. """""" _check_two_vectors(x, y) dtype = x.dtype.char if dtype == 'f': func = cublas.saxpy elif dtype == 'd': func = cublas.daxpy elif dtype == 'F': func = cublas.caxpy elif dtype == 'D': func = cublas.zaxpy else: raise TypeError('invalid dtype') handle = device.get_cublas_handle() a_ptr, mode = _setup_scalar_ptr(handle, a, dtype) func(handle, x.size, a_ptr, x.data.ptr, 1, y.data.ptr, 1) cublas.setPointerMode(handle, mode) return y ","def axpy(a, x, y): """"""Computes y += a * x. (*) y will be updated. """""" _check_two_vectors(x, y) dtype = x.dtype.char if dtype == 'f': func = cublas.saxpy elif dtype == 'd': func = cublas.daxpy elif dtype == 'F': func = cublas.caxpy elif dtype == 'D': func = cublas.zaxpy else: raise TypeError('invalid dtype') handle = device.get_cublas_handle() a_ptr, orig_mode = _setup_scalar_ptr(handle, a, dtype) func(handle, x.size, a_ptr, x.data.ptr, 1, y.data.ptr, 1) cublas.setPointerMode(handle, orig_mode) return y " 43698,"def max_independent_set(graph, constrained=True): r""""""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the MaxIndependentSet problem, for a given graph. The goal of MaxIndependentSet is to find the largest possible independent set of a graph. Given some graph :math:`G`, an independent set of :math:`G` is a set of vertices such that no two of the vertices in the set share a common edge. Args: graph (nx.Graph): a graph defining the pairs of wires on which each term of the Hamiltonian acts constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained) Returns: (.Hamiltonian, .Hamiltonian): .. UsageDetails:: There are two variations of QAOA for this problem, constrained and unconstrained: **Constrained** .. note:: This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas in `[arXiv:1709.03489] `__. The constrained MaxIndependentSet cost Hamiltonian is defined as: .. math:: H_C \ = \ \displaystyle\sum_{v \in V(G)} Z_{v} where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z operator applied to the :math:`i`-th vertex. The returned mixer Hamiltonian is `~qaoa.bit_flip_mixer` applied to :math:`G`. .. note:: **Recommended initialization circuit:** Each wire in the :math:`|0\rangle` state **Unconstrained** The unconstrained MaxIndependentSet cost Hamiltonian is defined as: .. math:: H_C \ = \ \frac{(i, j) \in E(G)} (Z_i Z_j \ - \ Z_i \ - \ Z_j) \ + \ \displaystyle\sum_{i \in V(G)} Z_i where :math:`E(G)` is the edges of :math:`G`, :math:`V(G)` is the set of vertices, and :math:`Z_i` is the Pauli-Z operator acting on the :math:`i`-th vertex. The returned mixer Hamiltonian is `~qaoa.x_mixer` applied to all wires. .. note:: **Recommended initialization circuit:** Even superposition over all basis states """""" if not isinstance(graph, nx.Graph): raise ValueError(""Input graph must be a nx.Graph, got {}"".format(type(graph).__name__)) if constrained: return (bit_driver(graph.nodes, 1), qaoa.bit_flip_mixer(graph, 0)) cost_h = edge_driver(graph, ['10', '01', '00']) + bit_driver(graph.nodes, 1) mixer_h = qaoa.x_mixer(graph.nodes) return (cost_h, mixer_h) ","def max_independent_set(graph, constrained=True): r""""""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the MaxIndependentSet problem, for a given graph. Given some graph :math:`G`, an independent set is a set of vertices such that no two of the vertices in the set share a common edge. The goal of MaxIndependentSet is to find the largest such set. Args: graph (nx.Graph): a graph defining the pairs of wires on which each term of the Hamiltonian acts constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained) Returns: (.Hamiltonian, .Hamiltonian): .. UsageDetails:: There are two variations of QAOA for this problem, constrained and unconstrained: **Constrained** .. note:: This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas in `[arXiv:1709.03489] `__. The constrained MaxIndependentSet cost Hamiltonian is defined as: .. math:: H_C \ = \ \displaystyle\sum_{v \in V(G)} Z_{v} where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z operator applied to the :math:`i`-th vertex. The returned mixer Hamiltonian is `~qaoa.bit_flip_mixer` applied to :math:`G`. .. note:: **Recommended initialization circuit:** Each wire in the :math:`|0\rangle` state **Unconstrained** The unconstrained MaxIndependentSet cost Hamiltonian is defined as: .. math:: H_C \ = \ \frac{(i, j) \in E(G)} (Z_i Z_j \ - \ Z_i \ - \ Z_j) \ + \ \displaystyle\sum_{i \in V(G)} Z_i where :math:`E(G)` is the edges of :math:`G`, :math:`V(G)` is the set of vertices, and :math:`Z_i` is the Pauli-Z operator acting on the :math:`i`-th vertex. The returned mixer Hamiltonian is `~qaoa.x_mixer` applied to all wires. .. note:: **Recommended initialization circuit:** Even superposition over all basis states """""" if not isinstance(graph, nx.Graph): raise ValueError(""Input graph must be a nx.Graph, got {}"".format(type(graph).__name__)) if constrained: return (bit_driver(graph.nodes, 1), qaoa.bit_flip_mixer(graph, 0)) cost_h = edge_driver(graph, ['10', '01', '00']) + bit_driver(graph.nodes, 1) mixer_h = qaoa.x_mixer(graph.nodes) return (cost_h, mixer_h) " 17456,"def guess_engine(store_spec): engines = list_engines() for engine, backend in engines.items(): try: if backend.guess_can_open(store_spec): return engine except Exception: warnings.warn(f""{engine!r} fails while guessing"", RuntimeWarning) compatible = [] for engine, backend_cls in BACKEND_ENTRYPOINTS.items(): try: backend = backend_cls() if backend.guess_can_open(store_spec): compatible.append(engine) except Exception: warnings.warn(f""{engine!r} fails while guessing"", RuntimeWarning) installed = [k for k in engines if k != ""store""] if not compatible: if installed: error_msg = ( ""did not find a match in any of xarray's currently installed IO "" f""backends {installed}. Consider explicitly selecting one of the "" ""installed engines via the ``engine`` parameter, or installing "" ""additional IO dependencies, see:\n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html\n"" ""http://xarray.pydata.org/en/stable/user-guide/io.html"" ) else: error_msg = ( ""xarray is unable to open this file because it has no currently "" ""installed IO backends. Xarray's read/write support requires "" ""installing optional IO dependencies, see:\n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html\n"" ""http://xarray.pydata.org/en/stable/user-guide/io"" ) else: error_msg = ( ""found the following matches with the input file in xarray's IO "" f""backends: {compatible}. But their dependencies may not be installed, see:\n"" ""http://xarray.pydata.org/en/stable/user-guide/io.html \n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html"" ) raise ValueError(error_msg) ","def guess_engine(store_spec): engines = list_engines() for engine, backend in engines.items(): try: if backend.guess_can_open(store_spec): return engine except Exception: warnings.warn(f""{engine!r} fails while guessing"", RuntimeWarning) compatible = [] for engine, backend_cls in BACKEND_ENTRYPOINTS.items(): try: backend = backend_cls() if backend.guess_can_open(store_spec): compatible.append(engine) except Exception: warnings.warn(f""{engine!r} fails while guessing"", RuntimeWarning) installed = [k for k in engines if k != ""store""] if not compatible: if installed: error_msg = ( ""did not find a match in any of xarray's currently installed IO "" f""backends {installed}. Consider explicitly selecting one of the "" ""installed engines via the ``engine`` parameter, or installing "" ""additional IO dependencies, see:\n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html\n"" ""http://xarray.pydata.org/en/stable/user-guide/io.html"" ) else: error_msg = ( ""xarray is unable to open this file because it has no currently "" ""installed IO backends. Xarray's read/write support requires "" ""installing optional IO dependencies, see:\n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html\n"" ""http://xarray.pydata.org/en/stable/user-guide/io"" ) else: error_msg = ( ""found the following matches with the input file in xarray's IO "" f""backends: {compatible}. But they may not be installed, see:\n"" ""http://xarray.pydata.org/en/stable/user-guide/io.html \n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html"" ) raise ValueError(error_msg) " 28793,"def _transform_overwrites(entry, data): overwrites = [] for elem in data: allow = Permissions(elem['allow']) deny = Permissions(elem['deny']) ow = PermissionOverwrite.from_pair(allow, deny) ow_type = elem['type'] ow_id = int(elem['id']) if ow_type == str(0): target = entry.guild.get_role(ow_id) else: target = entry._get_member(ow_id) if target is None: target = Object(id=ow_id) overwrites.append((target, ow)) return overwrites ","def _transform_overwrites(entry, data): overwrites = [] for elem in data: allow = Permissions(elem['allow']) deny = Permissions(elem['deny']) ow = PermissionOverwrite.from_pair(allow, deny) ow_type = elem['type'] ow_id = int(elem['id']) if ow_type == ""0"": target = entry.guild.get_role(ow_id) else: target = entry._get_member(ow_id) if target is None: target = Object(id=ow_id) overwrites.append((target, ow)) return overwrites " 7119,"def blind_richardson_lucy(image, psf=None, iterations=10, return_iterations=False, clip=False): """"""Blind Richardson-Lucy deconvolution. Parameters ---------- image : ndarray Input degraded image (can be N dimensional). psf : ndarray, optional A first estimate of the point spread function, same size as image iterations : int Number of iterations. This parameter plays the role of regularisation. After a given iterations, the estimates can produce division by 0 problems, then the algorithm is automatically stopped. return_iterations : boolean, optional Returns instead of a tuple of the final restorated image and used PSF a tuple of all iterations for further investigation clip : boolean, optional True by default. If true, pixel value of the result above 1 or under -1 are thresholded for skimage pipeline compatibility. Returns ------- im_deconv : ndarray The deconvolved image. psf : ndarray The last PSF estimate to deconvolve image Examples -------- >>> from skimage.restoration import blind_richardson_lucy >>> image = np.zeros((100,100)) >>> im[40:60, 45:55] = 1 >>> im[45:55, 40:60] = 1 >>> psf = np.zeros_like(image) >>> psf[50,50] = 1 >>> psf = gaussian(psf, 2) >>> image_conv = convolve2d(image, psf, 'same') >>> deconvolved, calc_psf = blind_richardson_lucy(image_conv, 10) Notes ----- This function estimates a point spread function based on an inverse Richardson Lucy algorithm as described in Fish et al., 1995. It is an iterative process where the PSF and image is deconvolved, respectively. It is more noise tolerant than other algorithms, such as Ayers-Dainty and the Weiner filter algorithms (taken from the paper). The algorithm performs well with gaussian PSFs and can recover them nicely without any prior knowledge. If one has already an educated guess, one should pass the PSF as argument to the function. Note, that the PSF should have the same shape as the image, and the PSF should be centered. Due to its nature, the algorithm may divide by 0. The function catches this issue and aborts the iterative process. Mostly, the optimal number of iterations is before this error may occur. References ---------- .. [1] Fish, D. A., A. M. Brinicombe, E. R. Pike, and J. G. Walker. ""Blind deconvolution by means of the Richardson–Lucy algorithm."" JOSA A 12, no. 1 (1995): 58-65. https://pdfs.semanticscholar.org/9e3f/a71e22caf358dbe873e9649f08c205d0c0c0.pdf """""" if return_iterations: all_iterations = np.empty((iterations, 2,) + image.shape) # Convert image to float for computations image = image.astype(np.float) # Initialize im_deconv and PSF im_deconv = np.full(image.shape, 0.5) if psf is None: psf = np.full(image.shape, 0.5) else: assert psf.shape == image.shape, \ 'Image and PSF should have the same shape!' psf = psf.astype(np.float) for i in range(iterations): # Deconvolve the PSF # Hack: in original publication one would have used `image`, # however, this does not work. # Using `im_deconv` instead recovers PSF. relative_blur_psf = im_deconv / fftconvolve(psf, im_deconv, 'same') # Check if relative_blur_psf contains nan, # causing the algorithm to fail if np.count_nonzero(~np.isnan(relative_blur_psf)) \ < relative_blur_psf.size: warnings.warn('Iterations stopped after {} iterations' ' because PSF contains zeros!'.format(i), RuntimeWarning) break else: psf *= fftconvolve(relative_blur_psf, im_deconv[::-1, ::-1], 'same') # Compute inverse again psf_mirror = psf[::-1, ::-1] # Standard Richardson-Lucy deconvolution relative_blur = image / fftconvolve(im_deconv, psf, 'same') im_deconv *= fftconvolve(relative_blur, psf_mirror, 'same') # Add iteration to list, if desired if return_iterations: all_iterations[i, 0] = im_deconv.copy() all_iterations[i, 1] = psf.copy() # Don't know if this makes sense here... if clip: im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv < -1] = -1 if return_iterations: return all_iterations else: return im_deconv, psf ","def blind_richardson_lucy(image, psf=None, iterations=10, return_iterations=False, clip=False): """"""Blind Richardson-Lucy deconvolution. Parameters ---------- image : ndarray Input degraded image (can be N dimensional). psf : ndarray, optional A first estimate of the point spread function, same size as image iterations : int Number of iterations. This parameter plays the role of regularisation. After a given iterations, the estimates can produce division by 0 problems, then the algorithm is automatically stopped. return_iterations : boolean, optional Returns instead of a tuple of the final restorated image and used PSF a tuple of all iterations for further investigation. clip : boolean, optional True by default. If true, pixel value of the result above 1 or under -1 are thresholded for skimage pipeline compatibility. Returns ------- im_deconv : ndarray The deconvolved image. psf : ndarray The last PSF estimate to deconvolve image Examples -------- >>> from skimage.restoration import blind_richardson_lucy >>> image = np.zeros((100,100)) >>> im[40:60, 45:55] = 1 >>> im[45:55, 40:60] = 1 >>> psf = np.zeros_like(image) >>> psf[50,50] = 1 >>> psf = gaussian(psf, 2) >>> image_conv = convolve2d(image, psf, 'same') >>> deconvolved, calc_psf = blind_richardson_lucy(image_conv, 10) Notes ----- This function estimates a point spread function based on an inverse Richardson Lucy algorithm as described in Fish et al., 1995. It is an iterative process where the PSF and image is deconvolved, respectively. It is more noise tolerant than other algorithms, such as Ayers-Dainty and the Weiner filter algorithms (taken from the paper). The algorithm performs well with gaussian PSFs and can recover them nicely without any prior knowledge. If one has already an educated guess, one should pass the PSF as argument to the function. Note, that the PSF should have the same shape as the image, and the PSF should be centered. Due to its nature, the algorithm may divide by 0. The function catches this issue and aborts the iterative process. Mostly, the optimal number of iterations is before this error may occur. References ---------- .. [1] Fish, D. A., A. M. Brinicombe, E. R. Pike, and J. G. Walker. ""Blind deconvolution by means of the Richardson–Lucy algorithm."" JOSA A 12, no. 1 (1995): 58-65. https://pdfs.semanticscholar.org/9e3f/a71e22caf358dbe873e9649f08c205d0c0c0.pdf """""" if return_iterations: all_iterations = np.empty((iterations, 2,) + image.shape) # Convert image to float for computations image = image.astype(np.float) # Initialize im_deconv and PSF im_deconv = np.full(image.shape, 0.5) if psf is None: psf = np.full(image.shape, 0.5) else: assert psf.shape == image.shape, \ 'Image and PSF should have the same shape!' psf = psf.astype(np.float) for i in range(iterations): # Deconvolve the PSF # Hack: in original publication one would have used `image`, # however, this does not work. # Using `im_deconv` instead recovers PSF. relative_blur_psf = im_deconv / fftconvolve(psf, im_deconv, 'same') # Check if relative_blur_psf contains nan, # causing the algorithm to fail if np.count_nonzero(~np.isnan(relative_blur_psf)) \ < relative_blur_psf.size: warnings.warn('Iterations stopped after {} iterations' ' because PSF contains zeros!'.format(i), RuntimeWarning) break else: psf *= fftconvolve(relative_blur_psf, im_deconv[::-1, ::-1], 'same') # Compute inverse again psf_mirror = psf[::-1, ::-1] # Standard Richardson-Lucy deconvolution relative_blur = image / fftconvolve(im_deconv, psf, 'same') im_deconv *= fftconvolve(relative_blur, psf_mirror, 'same') # Add iteration to list, if desired if return_iterations: all_iterations[i, 0] = im_deconv.copy() all_iterations[i, 1] = psf.copy() # Don't know if this makes sense here... if clip: im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv < -1] = -1 if return_iterations: return all_iterations else: return im_deconv, psf " 3717,"def _fastCopyAndTranspose(type, *arrays): cast_arrays = () for a in arrays: if not a.dtype.type is type: a = a.astype(type) cast_arrays = cast_arrays + (_fastCT(a),) if len(cast_arrays) == 1: return cast_arrays[0] else: return cast_arrays ","def _fastCopyAndTranspose(type, *arrays): cast_arrays = () for a in arrays: if a.dtype.type is not type: a = a.astype(type) cast_arrays = cast_arrays + (_fastCT(a),) if len(cast_arrays) == 1: return cast_arrays[0] else: return cast_arrays " 14918,"def setup_platform(hass, config, add_entities, discovery_info=None): """"""Set up the platform."""""" # Validate credentials by processing image. api_key = config[CONF_API_KEY] account_type = config[CONF_ACCOUNT_TYPE] api = hound.cloud(api_key, account_type) try: api.detect(b""Test"") except hound.SimplehoundException as exc: _LOGGER.error(""Sighthound error %s setup aborted"", exc) return save_file_folder = config.get(CONF_SAVE_FILE_FOLDER) if save_file_folder: save_file_folder = os.path.join(save_file_folder, """") # If no trailing / add it entities = [] for camera in config[CONF_SOURCE]: sighthound = SighthoundEntity( api, camera[CONF_ENTITY_ID], camera.get(CONF_NAME), save_file_folder ) entities.append(sighthound) add_entities(entities) ","def setup_platform(hass, config, add_entities, discovery_info=None): """"""Set up the platform."""""" # Validate credentials by processing image. api_key = config[CONF_API_KEY] account_type = config[CONF_ACCOUNT_TYPE] api = hound.cloud(api_key, account_type) try: api.detect(b""Test"") except hound.SimplehoundException as exc: _LOGGER.error(""Sighthound error %s setup aborted"", exc) return save_file_folder = config.get(CONF_SAVE_FILE_FOLDER) if save_file_folder: save_file_folder = Path(save_file_folder) entities = [] for camera in config[CONF_SOURCE]: sighthound = SighthoundEntity( api, camera[CONF_ENTITY_ID], camera.get(CONF_NAME), save_file_folder ) entities.append(sighthound) add_entities(entities) " 31655,"def main(): params = demisto.params() args = demisto.args() url = params.get('url') verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) headers = {} mock_data = str(args.get('mock-data', '')) if mock_data.lower() == ""true"": headers['Mock-Data'] = ""True"" headers['Authorization'] = f'Bearer {params[""api_key""]}' headers['Soar-Integration-Origin'] = ""Cortex XSOAR"" command = demisto.command() demisto.debug(f'Command being called is {command}') try: requests.packages.urllib3.disable_warnings() client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None) commands = { 'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case': check_the_status_of_an_action_requested_on_a_case_command, 'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat': check_the_status_of_an_action_requested_on_a_threat_command, 'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security': get_a_list_of_abnormal_cases_identified_by_abnormal_security_command, 'abxcortexxsoar-get-a-list-of-threats': get_a_list_of_threats_command, 'abxcortexxsoar-get-details-of-a-threat': get_details_of_a_threat_command, 'abxcortexxsoar-get-details-of-an-abnormal-case': get_details_of_an_abnormal_case_command, 'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command, 'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security': manage_a_threat_identified_by_abnormal_security_command, 'abxcortexxsoar-manage-an-abnormal-case': manage_an_abnormal_case_command, 'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security': submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command, } if command == 'test-module': headers['Mock-Data'] = ""True"" test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None) test_module(test_client) elif command in commands: return_results(commands[command](client, args)) else: raise NotImplementedError(f'{command} command is not implemented.') except Exception as e: return_error(str(e)) ","def main(): params = demisto.params() args = demisto.args() url = params.get('url') verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) headers = {} mock_data = str(args.get('mock-data', '')) if mock_data.lower() == ""true"": headers['Mock-Data'] = ""True"" headers['Authorization'] = f'Bearer {params[""api_key""]}' headers['Soar-Integration-Origin'] = ""Cortex XSOAR"" command = demisto.command() demisto.debug(f'Command being called is {command}') try: requests.packages.urllib3.disable_warnings() client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None) commands = { 'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case': check_the_status_of_an_action_requested_on_a_case_command, 'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat': check_the_status_of_an_action_requested_on_a_threat_command, 'abnormal-security-list-abnormal-cases-identified-by-abnormal-security': get_a_list_of_abnormal_cases_identified_by_abnormal_security_command, 'abxcortexxsoar-get-a-list-of-threats': get_a_list_of_threats_command, 'abxcortexxsoar-get-details-of-a-threat': get_details_of_a_threat_command, 'abxcortexxsoar-get-details-of-an-abnormal-case': get_details_of_an_abnormal_case_command, 'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command, 'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security': manage_a_threat_identified_by_abnormal_security_command, 'abxcortexxsoar-manage-an-abnormal-case': manage_an_abnormal_case_command, 'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security': submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command, } if command == 'test-module': headers['Mock-Data'] = ""True"" test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None) test_module(test_client) elif command in commands: return_results(commands[command](client, args)) else: raise NotImplementedError(f'{command} command is not implemented.') except Exception as e: return_error(str(e)) " 48938,"def get_placeholders(provider): placeholders = dict(os.environ) placeholders.setdefault('PGHOME', os.path.expanduser('~')) placeholders.setdefault('APIPORT', '8008') placeholders.setdefault('BACKUP_SCHEDULE', '0 1 * * *') placeholders.setdefault('BACKUP_NUM_TO_RETAIN', '5') placeholders.setdefault('CRONTAB', '[]') placeholders.setdefault('PGROOT', os.path.join(placeholders['PGHOME'], 'pgroot')) placeholders.setdefault('WALE_TMPDIR', os.path.abspath(os.path.join(placeholders['PGROOT'], '../tmp'))) placeholders.setdefault('PGDATA', os.path.join(placeholders['PGROOT'], 'pgdata')) placeholders.setdefault('HUMAN_ROLE', 'zalandos') placeholders.setdefault('PGUSER_STANDBY', 'standby') placeholders.setdefault('PGPASSWORD_STANDBY', 'standby') placeholders.setdefault('USE_ADMIN', 'PGPASSWORD_ADMIN' in placeholders) placeholders.setdefault('PGUSER_ADMIN', 'admin') placeholders.setdefault('PGPASSWORD_ADMIN', 'cola') placeholders.setdefault('PGUSER_SUPERUSER', 'postgres') placeholders.setdefault('PGPASSWORD_SUPERUSER', 'zalando') placeholders.setdefault('ALLOW_NOSSL', '') placeholders.setdefault('BGMON_LISTEN_IP', '0.0.0.0') placeholders.setdefault('PGPORT', '5432') placeholders.setdefault('SCOPE', 'dummy') placeholders.setdefault('RW_DIR', RW_DIR) placeholders.setdefault('SSL_TEST_RELOAD', 'SSL_PRIVATE_KEY_FILE' in os.environ) placeholders.setdefault('SSL_CA_FILE', '') placeholders.setdefault('SSL_CRL_FILE', '') placeholders.setdefault('SSL_CERTIFICATE_FILE', os.path.join(placeholders['RW_DIR'], 'certs', 'server.crt')) placeholders.setdefault('SSL_PRIVATE_KEY_FILE', os.path.join(placeholders['RW_DIR'], 'certs', 'server.key')) placeholders.setdefault('SSL_RESTAPI_CA_FILE', '') placeholders.setdefault('SSL_RESTAPI_CERTIFICATE_FILE', os.path.join(placeholders['RW_DIR'], 'certs', 'rest-api-server.crt')) placeholders.setdefault('SSL_RESTAPI_PRIVATE_KEY_FILE', os.path.join(placeholders['RW_DIR'], 'certs', 'restapi-api-server.key')) placeholders.setdefault('WALE_BACKUP_THRESHOLD_MEGABYTES', 102400) placeholders.setdefault('WALE_BACKUP_THRESHOLD_PERCENTAGE', 30) placeholders.setdefault('INITDB_LOCALE', 'en_US') # if Kubernetes is defined as a DCS, derive the namespace from the POD_NAMESPACE, if not set explicitely. # We only do this for Kubernetes DCS, as we don't want to suddently change, i.e. DCS base path when running # in Kubernetes with Etcd in a non-default namespace placeholders.setdefault('NAMESPACE', placeholders.get('POD_NAMESPACE', 'default') if USE_KUBERNETES and placeholders.get('DCS_ENABLE_KUBERNETES_API') else '') # use namespaces to set WAL bucket prefix scope naming the folder namespace-clustername for non-default namespace. placeholders.setdefault('WAL_BUCKET_SCOPE_PREFIX', '{0}-'.format(placeholders['NAMESPACE']) if placeholders['NAMESPACE'] not in ('default', '') else '') placeholders.setdefault('WAL_BUCKET_SCOPE_SUFFIX', '') placeholders.setdefault('WALE_ENV_DIR', os.path.join(placeholders['RW_DIR'], 'etc', 'wal-e.d', 'env')) placeholders.setdefault('USE_WALE', False) cpu_count = str(min(psutil.cpu_count(), 10)) placeholders.setdefault('WALG_DOWNLOAD_CONCURRENCY', cpu_count) placeholders.setdefault('WALG_UPLOAD_CONCURRENCY', cpu_count) placeholders.setdefault('PAM_OAUTH2', '') placeholders.setdefault('CALLBACK_SCRIPT', '') placeholders.setdefault('DCS_ENABLE_KUBERNETES_API', '') placeholders.setdefault('KUBERNETES_ROLE_LABEL', 'spilo-role') placeholders.setdefault('KUBERNETES_SCOPE_LABEL', 'version') placeholders.setdefault('KUBERNETES_LABELS', KUBERNETES_DEFAULT_LABELS) placeholders.setdefault('KUBERNETES_USE_CONFIGMAPS', '') placeholders.setdefault('KUBERNETES_BYPASS_API_SERVICE', 'true') placeholders.setdefault('USE_PAUSE_AT_RECOVERY_TARGET', False) placeholders.setdefault('CLONE_METHOD', '') placeholders.setdefault('CLONE_WITH_WALE', '') placeholders.setdefault('CLONE_WITH_BASEBACKUP', '') placeholders.setdefault('CLONE_TARGET_TIME', '') placeholders.setdefault('CLONE_TARGET_INCLUSIVE', True) placeholders.setdefault('LOG_SHIP_SCHEDULE', '1 0 * * *') placeholders.setdefault('LOG_S3_BUCKET', '') placeholders.setdefault('LOG_TMPDIR', os.path.abspath(os.path.join(placeholders['PGROOT'], '../tmp'))) placeholders.setdefault('LOG_BUCKET_SCOPE_SUFFIX', '') # see comment for wal-e bucket prefix placeholders.setdefault('LOG_BUCKET_SCOPE_PREFIX', '{0}-'.format(placeholders['NAMESPACE']) if placeholders['NAMESPACE'] not in ('default', '') else '') if placeholders['CLONE_METHOD'] == 'CLONE_WITH_WALE': # modify placeholders and take care of error cases name = set_extended_wale_placeholders(placeholders, 'CLONE_') if name is False: logging.warning('Cloning with WAL-E is only possible when CLONE_WALE_*_PREFIX ' 'or CLONE_WALG_*_PREFIX or CLONE_WAL_*_BUCKET and CLONE_SCOPE are set.') elif name == 'S3': placeholders.setdefault('CLONE_USE_WALG', 'true') elif placeholders['CLONE_METHOD'] == 'CLONE_WITH_BASEBACKUP': clone_scope = placeholders.get('CLONE_SCOPE') if clone_scope and placeholders.get('CLONE_HOST') \ and placeholders.get('CLONE_USER') and placeholders.get('CLONE_PASSWORD'): placeholders['CLONE_WITH_BASEBACKUP'] = True placeholders.setdefault('CLONE_PGPASS', os.path.join(placeholders['PGHOME'], '.pgpass_{0}'.format(clone_scope))) placeholders.setdefault('CLONE_PORT', 5432) else: logging.warning(""Clone method is set to basebackup, but no 'CLONE_SCOPE' "" ""or 'CLONE_HOST' or 'CLONE_USER' or 'CLONE_PASSWORD' specified"") else: if set_extended_wale_placeholders(placeholders, 'STANDBY_') == 'S3': placeholders.setdefault('STANDBY_USE_WALG', 'true') placeholders.setdefault('STANDBY_WITH_WALE', '') placeholders.setdefault('STANDBY_HOST', '') placeholders.setdefault('STANDBY_PORT', '') placeholders.setdefault('STANDBY_CLUSTER', placeholders['STANDBY_WITH_WALE'] or placeholders['STANDBY_HOST']) if provider == PROVIDER_AWS and not USE_KUBERNETES: # AWS specific callback to tag the instances with roles placeholders['CALLBACK_SCRIPT'] = 'python3 /scripts/callback_aws.py' if placeholders.get('EIP_ALLOCATION'): placeholders['CALLBACK_SCRIPT'] += ' ' + placeholders['EIP_ALLOCATION'] if any(placeholders.get(n) for n in AUTO_ENABLE_WALG_RESTORE): placeholders.setdefault('USE_WALG_RESTORE', 'true') if placeholders.get('WALG_AZ_PREFIX'): placeholders.setdefault('USE_WALG_BACKUP', 'true') if all(placeholders.get(n) for n in WALG_SSH_NAMES): placeholders.setdefault('USE_WALG_BACKUP', 'true') set_walg_placeholders(placeholders) placeholders['USE_WALE'] = any(placeholders.get(n) for n in AUTO_ENABLE_WALG_RESTORE + ('WAL_SWIFT_BUCKET', 'WALE_SWIFT_PREFIX', 'WAL_GCS_BUCKET', 'WAL_GS_BUCKET', 'WALE_GS_PREFIX', 'WALG_GS_PREFIX')) if placeholders.get('WALG_BACKUP_FROM_REPLICA'): placeholders['WALG_BACKUP_FROM_REPLICA'] = str(placeholders['WALG_BACKUP_FROM_REPLICA']).lower() # Kubernetes requires a callback to change the labels in order to point to the new master if USE_KUBERNETES: if not placeholders.get('DCS_ENABLE_KUBERNETES_API'): placeholders['CALLBACK_SCRIPT'] = 'python3 /scripts/callback_role.py' placeholders.setdefault('postgresql', {}) placeholders['postgresql'].setdefault('parameters', {}) placeholders['WALE_BINARY'] = 'wal-g' if placeholders.get('USE_WALG_BACKUP') == 'true' else 'wal-e' placeholders['postgresql']['parameters']['archive_command'] = \ 'envdir ""{WALE_ENV_DIR}"" {WALE_BINARY} wal-push ""%p""'.format(**placeholders) \ if placeholders['USE_WALE'] else '/bin/true' if os.path.exists(MEMORY_LIMIT_IN_BYTES_PATH): with open(MEMORY_LIMIT_IN_BYTES_PATH) as f: os_memory_mb = int(f.read()) / 1048576 else: os_memory_mb = sys.maxsize os_memory_mb = min(os_memory_mb, os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / 1048576) # Depending on environment we take 1/4 or 1/5 of the memory, expressed in full MB's sb_ratio = 5 if USE_KUBERNETES else 4 placeholders['postgresql']['parameters']['shared_buffers'] = '{}MB'.format(int(os_memory_mb/sb_ratio)) # # 1 connection per 30 MB, at least 100, at most 1000 placeholders['postgresql']['parameters']['max_connections'] = min(max(100, int(os_memory_mb/30)), 1000) placeholders['instance_data'] = get_instance_metadata(provider) placeholders['BGMON_LISTEN_IP'] = get_listen_ip() if 'SSL_CA' in placeholders and placeholders['SSL_CA_FILE'] == '': placeholders['SSL_CA_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'ca.crt') if 'SSL_CRL' in placeholders and placeholders['SSL_CRL_FILE'] == '': placeholders['SSL_CRL_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'server.crl') ssl_keys = ['SSL_RESTAPI_CERTIFICATE', 'SSL_RESTAPI_PRIVATE_KEY'] if not set(ssl_keys) <= set(placeholders): placeholders['SSL_RESTAPI_CERTIFICATE_FILE'] = '' placeholders['SSL_RESTAPI_PRIVATE_KEY_FILE'] = '' placeholders['SSL_RESTAPI_CA_FILE'] = '' placeholders['SSL_RESTAPI_CA'] = '' elif 'SSL_RESTAPI_CA' in placeholders and placeholders['SSL_RESTAPI_CA_FILE'] == '': placeholders['SSL_RESTAPI_CA_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'rest-api-ca.crt') return placeholders ","def get_placeholders(provider): placeholders = dict(os.environ) placeholders.setdefault('PGHOME', os.path.expanduser('~')) placeholders.setdefault('APIPORT', '8008') placeholders.setdefault('BACKUP_SCHEDULE', '0 1 * * *') placeholders.setdefault('BACKUP_NUM_TO_RETAIN', '5') placeholders.setdefault('CRONTAB', '[]') placeholders.setdefault('PGROOT', os.path.join(placeholders['PGHOME'], 'pgroot')) placeholders.setdefault('WALE_TMPDIR', os.path.abspath(os.path.join(placeholders['PGROOT'], '../tmp'))) placeholders.setdefault('PGDATA', os.path.join(placeholders['PGROOT'], 'pgdata')) placeholders.setdefault('HUMAN_ROLE', 'zalandos') placeholders.setdefault('PGUSER_STANDBY', 'standby') placeholders.setdefault('PGPASSWORD_STANDBY', 'standby') placeholders.setdefault('USE_ADMIN', 'PGPASSWORD_ADMIN' in placeholders) placeholders.setdefault('PGUSER_ADMIN', 'admin') placeholders.setdefault('PGPASSWORD_ADMIN', 'cola') placeholders.setdefault('PGUSER_SUPERUSER', 'postgres') placeholders.setdefault('PGPASSWORD_SUPERUSER', 'zalando') placeholders.setdefault('ALLOW_NOSSL', '') placeholders.setdefault('BGMON_LISTEN_IP', '0.0.0.0') placeholders.setdefault('PGPORT', '5432') placeholders.setdefault('SCOPE', 'dummy') placeholders.setdefault('RW_DIR', RW_DIR) placeholders.setdefault('SSL_TEST_RELOAD', 'SSL_PRIVATE_KEY_FILE' in os.environ) placeholders.setdefault('SSL_CA_FILE', '') placeholders.setdefault('SSL_CRL_FILE', '') placeholders.setdefault('SSL_CERTIFICATE_FILE', os.path.join(placeholders['RW_DIR'], 'certs', 'server.crt')) placeholders.setdefault('SSL_PRIVATE_KEY_FILE', os.path.join(placeholders['RW_DIR'], 'certs', 'server.key')) placeholders.setdefault('SSL_RESTAPI_CA_FILE', '') placeholders.setdefault('SSL_RESTAPI_CERTIFICATE_FILE', os.path.join(placeholders['RW_DIR'], 'certs', 'rest-api-server.crt')) placeholders.setdefault('SSL_RESTAPI_PRIVATE_KEY_FILE', os.path.join(placeholders['RW_DIR'], 'certs', 'restapi-api-server.key')) placeholders.setdefault('WALE_BACKUP_THRESHOLD_MEGABYTES', 102400) placeholders.setdefault('WALE_BACKUP_THRESHOLD_PERCENTAGE', 30) placeholders.setdefault('INITDB_LOCALE', 'en_US') # if Kubernetes is defined as a DCS, derive the namespace from the POD_NAMESPACE, if not set explicitely. # We only do this for Kubernetes DCS, as we don't want to suddently change, i.e. DCS base path when running # in Kubernetes with Etcd in a non-default namespace placeholders.setdefault('NAMESPACE', placeholders.get('POD_NAMESPACE', 'default') if USE_KUBERNETES and placeholders.get('DCS_ENABLE_KUBERNETES_API') else '') # use namespaces to set WAL bucket prefix scope naming the folder namespace-clustername for non-default namespace. placeholders.setdefault('WAL_BUCKET_SCOPE_PREFIX', '{0}-'.format(placeholders['NAMESPACE']) if placeholders['NAMESPACE'] not in ('default', '') else '') placeholders.setdefault('WAL_BUCKET_SCOPE_SUFFIX', '') placeholders.setdefault('WALE_ENV_DIR', os.path.join(placeholders['RW_DIR'], 'etc', 'wal-e.d', 'env')) placeholders.setdefault('USE_WALE', False) cpu_count = str(min(psutil.cpu_count(), 10)) placeholders.setdefault('WALG_DOWNLOAD_CONCURRENCY', cpu_count) placeholders.setdefault('WALG_UPLOAD_CONCURRENCY', cpu_count) placeholders.setdefault('PAM_OAUTH2', '') placeholders.setdefault('CALLBACK_SCRIPT', '') placeholders.setdefault('DCS_ENABLE_KUBERNETES_API', '') placeholders.setdefault('KUBERNETES_ROLE_LABEL', 'spilo-role') placeholders.setdefault('KUBERNETES_SCOPE_LABEL', 'version') placeholders.setdefault('KUBERNETES_LABELS', KUBERNETES_DEFAULT_LABELS) placeholders.setdefault('KUBERNETES_USE_CONFIGMAPS', '') placeholders.setdefault('KUBERNETES_BYPASS_API_SERVICE', 'true') placeholders.setdefault('USE_PAUSE_AT_RECOVERY_TARGET', False) placeholders.setdefault('CLONE_METHOD', '') placeholders.setdefault('CLONE_WITH_WALE', '') placeholders.setdefault('CLONE_WITH_BASEBACKUP', '') placeholders.setdefault('CLONE_TARGET_TIME', '') placeholders.setdefault('CLONE_TARGET_INCLUSIVE', True) placeholders.setdefault('LOG_SHIP_SCHEDULE', '1 0 * * *') placeholders.setdefault('LOG_S3_BUCKET', '') placeholders.setdefault('LOG_TMPDIR', os.path.abspath(os.path.join(placeholders['PGROOT'], '../tmp'))) placeholders.setdefault('LOG_BUCKET_SCOPE_SUFFIX', '') # see comment for wal-e bucket prefix placeholders.setdefault('LOG_BUCKET_SCOPE_PREFIX', '{0}-'.format(placeholders['NAMESPACE']) if placeholders['NAMESPACE'] not in ('default', '') else '') if placeholders['CLONE_METHOD'] == 'CLONE_WITH_WALE': # modify placeholders and take care of error cases name = set_extended_wale_placeholders(placeholders, 'CLONE_') if name is False: logging.warning('Cloning with WAL-E is only possible when CLONE_WALE_*_PREFIX ' 'or CLONE_WALG_*_PREFIX or CLONE_WAL_*_BUCKET and CLONE_SCOPE are set.') elif name == 'S3': placeholders.setdefault('CLONE_USE_WALG', 'true') elif placeholders['CLONE_METHOD'] == 'CLONE_WITH_BASEBACKUP': clone_scope = placeholders.get('CLONE_SCOPE') if clone_scope and placeholders.get('CLONE_HOST') \ and placeholders.get('CLONE_USER') and placeholders.get('CLONE_PASSWORD'): placeholders['CLONE_WITH_BASEBACKUP'] = True placeholders.setdefault('CLONE_PGPASS', os.path.join(placeholders['PGHOME'], '.pgpass_{0}'.format(clone_scope))) placeholders.setdefault('CLONE_PORT', 5432) else: logging.warning(""Clone method is set to basebackup, but no 'CLONE_SCOPE' "" ""or 'CLONE_HOST' or 'CLONE_USER' or 'CLONE_PASSWORD' specified"") else: if set_extended_wale_placeholders(placeholders, 'STANDBY_') == 'S3': placeholders.setdefault('STANDBY_USE_WALG', 'true') placeholders.setdefault('STANDBY_WITH_WALE', '') placeholders.setdefault('STANDBY_HOST', '') placeholders.setdefault('STANDBY_PORT', '') placeholders.setdefault('STANDBY_CLUSTER', placeholders['STANDBY_WITH_WALE'] or placeholders['STANDBY_HOST']) if provider == PROVIDER_AWS and not USE_KUBERNETES: # AWS specific callback to tag the instances with roles placeholders['CALLBACK_SCRIPT'] = 'python3 /scripts/callback_aws.py' if placeholders.get('EIP_ALLOCATION'): placeholders['CALLBACK_SCRIPT'] += ' ' + placeholders['EIP_ALLOCATION'] if any(placeholders.get(n) for n in AUTO_ENABLE_WALG_RESTORE): placeholders.setdefault('USE_WALG_RESTORE', 'true') if placeholders.get('WALG_AZ_PREFIX'): placeholders.setdefault('USE_WALG_BACKUP', 'true') if all(placeholders.get(n) for n in WALG_SSH_NAMES): placeholders.setdefault('USE_WALG_BACKUP', 'true') set_walg_placeholders(placeholders) placeholders['USE_WALE'] = any(placeholders.get(n) for n in AUTO_ENABLE_WALG_RESTORE + ('WAL_SWIFT_BUCKET', 'WALE_SWIFT_PREFIX', 'WAL_GCS_BUCKET', 'WAL_GS_BUCKET', 'WALE_GS_PREFIX', 'WALG_GS_PREFIX')) if placeholders.get('WALG_BACKUP_FROM_REPLICA'): placeholders['WALG_BACKUP_FROM_REPLICA'] = str(placeholders['WALG_BACKUP_FROM_REPLICA']).lower() # Kubernetes requires a callback to change the labels in order to point to the new master if USE_KUBERNETES: if not placeholders.get('DCS_ENABLE_KUBERNETES_API'): placeholders['CALLBACK_SCRIPT'] = 'python3 /scripts/callback_role.py' placeholders.setdefault('postgresql', {}) placeholders['postgresql'].setdefault('parameters', {}) placeholders['WALE_BINARY'] = 'wal-g' if placeholders.get('USE_WALG_BACKUP') == 'true' else 'wal-e' placeholders['postgresql']['parameters']['archive_command'] = \ 'envdir ""{WALE_ENV_DIR}"" {WALE_BINARY} wal-push ""%p""'.format(**placeholders) \ if placeholders['USE_WALE'] else '/bin/true' if os.path.exists(MEMORY_LIMIT_IN_BYTES_PATH): with open(MEMORY_LIMIT_IN_BYTES_PATH) as f: os_memory_mb = int(f.read()) / 1048576 else: os_memory_mb = sys.maxsize os_memory_mb = min(os_memory_mb, os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / 1048576) # Depending on environment we take 1/4 or 1/5 of the memory, expressed in full MB's sb_ratio = 5 if USE_KUBERNETES else 4 placeholders['postgresql']['parameters']['shared_buffers'] = '{}MB'.format(int(os_memory_mb/sb_ratio)) # # 1 connection per 30 MB, at least 100, at most 1000 placeholders['postgresql']['parameters']['max_connections'] = min(max(100, int(os_memory_mb/30)), 1000) placeholders['instance_data'] = get_instance_metadata(provider) placeholders['BGMON_LISTEN_IP'] = get_listen_ip() if 'SSL_CA' in placeholders and placeholders['SSL_CA_FILE'] == '': placeholders['SSL_CA_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'ca.crt') if 'SSL_CRL' in placeholders and placeholders['SSL_CRL_FILE'] == '': placeholders['SSL_CRL_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'server.crl') if {'SSL_RESTAPI_CERTIFICATE', 'SSL_RESTAPI_PRIVATE_KEY'} <= set(placeholders): if not placeholders['SSL_RESTAPI_CERTIFICATE_FILE']: placeholders['SSL_RESTAPI_CERTIFICATE_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'rest-api-server.crt') if not placeholders['SSL_RESTAPI_PRIVATE_KEY_FILE']: placeholders['SSL_RESTAPI_PRIVATE_KEY_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'restapi-api-server.key') if placeholders.get('SSL_RESTAPI_CA') and not placeholders['SSL_RESTAPI_CA_FILE']: placeholders['SSL_RESTAPI_CA_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'rest-api-ca.crt') return placeholders " 57494,"def _convert_strict_union_type(type_context: AnalyzeTypeContext) -> Type: """"""Convert StrictUnion[...] type to Union[...]"""""" union_mypy_types: Tuple[Type, ...] = type_context.type.args # e.g. (int?, bool?, str?) return UnionType(items=union_mypy_types, line=type_context.type.line, column=type_context.type.column) ","def _convert_strict_union_type(type_context: AnalyzeTypeContext) -> Type: """""" Convert StrictUnion[...] type to Union[...] """""" union_mypy_types: Tuple[Type, ...] = type_context.type.args # e.g. (int?, bool?, str?) return UnionType(items=union_mypy_types, line=type_context.type.line, column=type_context.type.column) " 47177,"def distributed_concat(tensor: ""torch.Tensor"", num_total_examples: Optional[int] = None) -> torch.Tensor: try: if isinstance(tensor, (tuple, list)): return type(tensor)(distributed_concat(t, num_total_examples) for t in tensor) output_tensors = [tensor.clone() for _ in range(dist.get_world_size())] dist.all_gather(output_tensors, tensor) output_tensors = [t if len(t.shape) > 0 else t.reshape((-1,)) for t in output_tensors] concat = torch.cat(output_tensors, dim=0) # truncate the dummy elements added by SequentialDistributedSampler if num_total_examples is not None: concat = concat[:num_total_examples] return concat except AssertionError: raise AssertionError(""Not currently using distributed training"") ","def distributed_concat(tensor: ""torch.Tensor"", num_total_examples: Optional[int] = None) -> torch.Tensor: try: if isinstance(tensor, (tuple, list)): return type(tensor)(distributed_concat(t, num_total_examples) for t in tensor) output_tensors = [tensor.clone() for _ in range(dist.get_world_size())] dist.all_gather(output_tensors, tensor) output_tensors = [t if len(t.shape) > 0 else t[None] for t in output_tensors] concat = torch.cat(output_tensors, dim=0) # truncate the dummy elements added by SequentialDistributedSampler if num_total_examples is not None: concat = concat[:num_total_examples] return concat except AssertionError: raise AssertionError(""Not currently using distributed training"") " 30656,"def ip_command(): ip = demisto.args()['ip'] res = http_request('GET', f'/shodan/host/{ip}') if not res: demisto.results('No information available for that IP.') else: hostnames = res.get('hostnames') hostname = hostnames[0] if hostnames else '' # It's a list, only if it exists and not empty we take the first value location = f'{round(res.get(""latitude"", 0.0), 3)},{round(res.get(""longitude"", 0.0), 3)}' ip_details = { 'ASN': res.get('asn', ''), 'Address': ip, 'Hostname': hostname, 'Geo': { 'Country': res.get('country_name', ''), 'Location': location } } shodan_ip_details = { 'Tag': res.get('tags', []), 'Latitude': res.get('latitude', 0.0), 'Longitude': res.get('longitude', 0.0), 'Org': res.get('org', ''), 'ASN': res.get('asn', ''), 'ISP': res.get('isp', ''), 'LastUpdate': res.get('last_update', ''), 'CountryName': res.get('country_name', ''), 'Address': ip, 'OS': res.get('os', ''), 'Port': res.get('ports', []) } ec = { outputPaths['ip']: ip_details, 'Shodan': { 'IP': shodan_ip_details } } human_readable = tableToMarkdown(f'Shodan details for IP {ip}', { 'Country': ec[outputPaths['ip']]['Geo']['Country'], 'Location': ec[outputPaths['ip']]['Geo']['Location'], 'ASN': ec[outputPaths['ip']]['ASN'], 'ISP': ec['Shodan']['IP']['ISP'], 'Ports': ', '.join([str(x) for x in ec['Shodan']['IP']['Port']]), 'Hostname': ec[outputPaths['ip']]['Hostname'] }) demisto.results({ 'Type': entryTypes['note'], 'Contents': res, 'ContentsFormat': formats['json'], 'HumanReadable': human_readable, 'HumanReadableFormat': formats['markdown'], 'EntryContext': ec }) ","def ip_command(): ip = demisto.args()['ip'] res = http_request('GET', f'/shodan/host/{ip}') if not res: demisto.results('No information available for the given IP.') else: hostnames = res.get('hostnames') hostname = hostnames[0] if hostnames else '' # It's a list, only if it exists and not empty we take the first value location = f'{round(res.get(""latitude"", 0.0), 3)},{round(res.get(""longitude"", 0.0), 3)}' ip_details = { 'ASN': res.get('asn', ''), 'Address': ip, 'Hostname': hostname, 'Geo': { 'Country': res.get('country_name', ''), 'Location': location } } shodan_ip_details = { 'Tag': res.get('tags', []), 'Latitude': res.get('latitude', 0.0), 'Longitude': res.get('longitude', 0.0), 'Org': res.get('org', ''), 'ASN': res.get('asn', ''), 'ISP': res.get('isp', ''), 'LastUpdate': res.get('last_update', ''), 'CountryName': res.get('country_name', ''), 'Address': ip, 'OS': res.get('os', ''), 'Port': res.get('ports', []) } ec = { outputPaths['ip']: ip_details, 'Shodan': { 'IP': shodan_ip_details } } human_readable = tableToMarkdown(f'Shodan details for IP {ip}', { 'Country': ec[outputPaths['ip']]['Geo']['Country'], 'Location': ec[outputPaths['ip']]['Geo']['Location'], 'ASN': ec[outputPaths['ip']]['ASN'], 'ISP': ec['Shodan']['IP']['ISP'], 'Ports': ', '.join([str(x) for x in ec['Shodan']['IP']['Port']]), 'Hostname': ec[outputPaths['ip']]['Hostname'] }) demisto.results({ 'Type': entryTypes['note'], 'Contents': res, 'ContentsFormat': formats['json'], 'HumanReadable': human_readable, 'HumanReadableFormat': formats['markdown'], 'EntryContext': ec }) " 46291,"def is_diagonal(matrix, tol=1e-8): """"""Determine whether affine is a diagonal matrix. Parameters ---------- matrix : 2-D array The matrix to test. tol : float, optional Consider any entries with magnitude < `tol` as 0. Returns ------- is_diag : bool Boolean indicating whether affine is diagonal. """""" if matrix.ndim != 2 or matrix.shape[0] != matrix.shape[1]: raise ValueError(""matrix must be square"") non_diag = matrix[~np.eye(matrix.shape[0], dtype=bool)] if tol == 0: return np.count_nonzero(non_diag) == 0 else: return np.max(np.abs(non_diag)) <= tol ","def is_diagonal(matrix, tol=1e-8): """"""Determine whether a matrix is diagonal up to some tolerance. Parameters ---------- matrix : 2-D array The matrix to test. tol : float, optional Consider any entries with magnitude < `tol` as 0. Returns ------- is_diag : bool Boolean indicating whether affine is diagonal. """""" if matrix.ndim != 2 or matrix.shape[0] != matrix.shape[1]: raise ValueError(""matrix must be square"") non_diag = matrix[~np.eye(matrix.shape[0], dtype=bool)] if tol == 0: return np.count_nonzero(non_diag) == 0 else: return np.max(np.abs(non_diag)) <= tol " 36273,"def calibrate_observable_estimates(qc: QuantumComputer, expt_results: List[ExperimentResult], n_shots: int = 500, symm_type: int = -1, noisy_program: Program = None, active_reset: bool = False, show_progress_bar: bool = False) \ -> Iterable[ExperimentResult]: """""" Calibrates the expectation and std_err of the input expt_results and updates those estimates. The input expt_results should be estimated with symmetrized readout error for this to work properly. Calibration is done by measuring expectation values of eigenstates of the observable, which ideally should yield either +/- 1 but in practice will have magnitude less than 1. For default exhaustive_symmetrization the calibration expectation magnitude averaged over all eigenvectors is recorded as calibration_expectation. The original expectation is moved to raw_expectation and replaced with the old value scaled by the inverse calibration expectation. :param qc: a quantum computer object on which to run the programs necessary to calibrate each result. :param expt_results: a list of results, each of which will be separately calibrated. :param n_shots: the number of shots to run for each eigenvector :param symm_type: the type of symmetrization * -1 -- exhaustive symmetrization uses every possible combination of flips; this option is the default since it ensures proper calibration, but is exponential in the weight of each observable. * 0 -- no symmetrization * 1 -- symmetrization using an OA with strength 1 * 2 -- symmetrization using an OA with strength 2 * 3 -- symmetrization using an OA with strength 3 TODO: accomodate calibration for weight > symmetrization strength (symm_type) Currently, the symmetrization type must be at least the maximum weight of any observable estimated and also match the symmetrization type used to estimate the observables of the input ExperimentResults. :param noisy_program: an optional program from which to inherit a noise model; only relevant for running on a QVM :param active_reset: whether or not to begin the program by actively resetting. If true, execution of each of the returned programs in a loop on the QPU will generally be faster. :param show_progress_bar: displays a progress bar via tqdm if true. :return: a copy of the input results with updated estimates and calibration results. """""" observables = [copy(res.setting.out_operator) for res in expt_results] observables = list(set(observables)) # get unique observables that will need to be calibrated programs = [get_calibration_program(obs, noisy_program, active_reset) for obs in observables] meas_qubits = [obs.get_qubits() for obs in observables] calibrations = {} for prog, meas_qs, obs in zip(tqdm(programs, disable=not show_progress_bar), meas_qubits, observables): results = qc.run_symmetrized_readout(prog, n_shots, symm_type, meas_qs) # Obtain statistics from result of experiment # TODO: we have to fabricate an ExperimentSetting to pass to _stats_from_measurements # even though it only needs the observable. setting = ExperimentSetting(zeros_state(meas_qs), obs) obs_mean, obs_var = _stats_from_measurements(results, {q: idx for idx, q in enumerate(meas_qs)}, setting) calibrations[obs.operations_as_set()] = (obs_mean, obs_var, len(results)) for expt_result in expt_results: # TODO: allow weight > symm_type if -1 < symm_type < len(expt_result.setting.out_operator.get_qubits()): warnings.warn(f'Calibration of observable {expt_result.setting.out_operator} ' f'currently not supported since it acts on more qubits than the ' f'symm_type {symm_type}.') # get the calibration data for this observable cal_data = calibrations[expt_result.setting.out_operator.operations_as_set()] obs_mean, obs_var, counts = cal_data # Use the calibration to correct the mean and var result_mean = expt_result.expectation result_var = expt_result.std_err ** 2 corrected_mean = result_mean / obs_mean corrected_var = ratio_variance(result_mean, result_var, obs_mean, obs_var) yield ExperimentResult( setting=expt_result.setting, expectation=corrected_mean, std_err=np.sqrt(corrected_var), total_counts=expt_result.total_counts, raw_expectation=result_mean, raw_std_err=expt_result.std_err, calibration_expectation=obs_mean, calibration_std_err=np.sqrt(obs_var), calibration_counts=counts ) ","def calibrate_observable_estimates(qc: QuantumComputer, expt_results: List[ExperimentResult], n_shots: int = 500, symm_type: int = -1, noisy_program: Program = None, active_reset: bool = False, show_progress_bar: bool = False) \ -> Iterable[ExperimentResult]: """""" Calibrates the expectation and std_err of the input expt_results and updates those estimates. The input expt_results should be estimated with symmetrized readout error for this to work properly. Calibration is done by measuring expectation values of eigenstates of the observable, which ideally should yield either +/- 1 but in practice will have magnitude less than 1. For default exhaustive_symmetrization the calibration expectation magnitude averaged over all eigenvectors is recorded as calibration_expectation. The original expectation is moved to raw_expectation and replaced with the old value scaled by the inverse calibration expectation. :param qc: a quantum computer object on which to run the programs necessary to calibrate each result. :param expt_results: a list of results, each of which will be separately calibrated. :param n_shots: the number of shots to run for each eigenvector :param symm_type: the type of symmetrization * -1 -- exhaustive symmetrization uses every possible combination of flips; this option is the default since it ensures proper calibration, but is exponential in the weight of each observable. * 0 -- no symmetrization * 1 -- symmetrization using an OA with strength 1 * 2 -- symmetrization using an OA with strength 2 * 3 -- symmetrization using an OA with strength 3 TODO: accomodate calibration for weight > symmetrization strength (symm_type) Currently, the symmetrization type must be at least the maximum weight of any observable estimated and also match the symmetrization type used to estimate the observables of the input ExperimentResults. :param noisy_program: an optional program from which to inherit a noise model; only relevant for running on a QVM :param active_reset: whether or not to begin the program by actively resetting. If true, execution of each of the returned programs in a loop on the QPU will generally be faster. :param show_progress_bar: displays a progress bar via tqdm if true. :return: a copy of the input results with updated estimates and calibration results. """""" observables = [copy(res.setting.out_operator) for res in expt_results] observables = list(observables) # get unique observables that will need to be calibrated programs = [get_calibration_program(obs, noisy_program, active_reset) for obs in observables] meas_qubits = [obs.get_qubits() for obs in observables] calibrations = {} for prog, meas_qs, obs in zip(tqdm(programs, disable=not show_progress_bar), meas_qubits, observables): results = qc.run_symmetrized_readout(prog, n_shots, symm_type, meas_qs) # Obtain statistics from result of experiment # TODO: we have to fabricate an ExperimentSetting to pass to _stats_from_measurements # even though it only needs the observable. setting = ExperimentSetting(zeros_state(meas_qs), obs) obs_mean, obs_var = _stats_from_measurements(results, {q: idx for idx, q in enumerate(meas_qs)}, setting) calibrations[obs.operations_as_set()] = (obs_mean, obs_var, len(results)) for expt_result in expt_results: # TODO: allow weight > symm_type if -1 < symm_type < len(expt_result.setting.out_operator.get_qubits()): warnings.warn(f'Calibration of observable {expt_result.setting.out_operator} ' f'currently not supported since it acts on more qubits than the ' f'symm_type {symm_type}.') # get the calibration data for this observable cal_data = calibrations[expt_result.setting.out_operator.operations_as_set()] obs_mean, obs_var, counts = cal_data # Use the calibration to correct the mean and var result_mean = expt_result.expectation result_var = expt_result.std_err ** 2 corrected_mean = result_mean / obs_mean corrected_var = ratio_variance(result_mean, result_var, obs_mean, obs_var) yield ExperimentResult( setting=expt_result.setting, expectation=corrected_mean, std_err=np.sqrt(corrected_var), total_counts=expt_result.total_counts, raw_expectation=result_mean, raw_std_err=expt_result.std_err, calibration_expectation=obs_mean, calibration_std_err=np.sqrt(obs_var), calibration_counts=counts ) " 58156,"def get_events(aws_client: boto3.client, collect_from: dict, collect_from_default: Optional[datetime], last_ids: dict, severity: str, limit: int = MAX_RESULTS, detectors_num: int = MAX_RESULTS, max_ids_per_req: int = MAX_IDS_PER_REQ) -> Tuple[list, dict, dict]: """"""Get events from AWSGuardDuty. Args: aws_client: AWSClient session to get events from. collect_from: Dict of {detector_id: datestring to start collecting from}, used when fetching. collect_from_default: datetime to start collecting from if detector id is not found in collect_from keys. last_ids: Dict of {detector_id: last fetched id}, used to avoid duplicates. severity: The minimum severity to start fetching from. (inclusive) limit: The maximum number of events to fetch. detectors_num: The maximum number of detectors to fetch. max_ids_per_req: The maximum number of findings to get per API request. Returns: (events, new_last_ids, new_collect_from) events (list): The events fetched. new_last_ids (dict): The new last_ids dict, expected to receive as last_ids input in the next run. new_collect_from (dict): The new collect_from dict, expected to receive as collect_from input in the next run. """""" events: list = [] detector_ids: list = [] next_token = 'starting_token' new_last_ids = last_ids.copy() new_collect_from = collect_from.copy() demisto.info(f""AWSGuardDutyEventCollector Starting get_events. collect_from is {collect_from}, "" f""collect_from_default is {collect_from_default}, last_ids are {last_ids}"") # List all detectors while next_token: list_detectors_args: dict = {'MaxResults': detectors_num} if next_token != 'starting_token': list_detectors_args.update({'NextToken': next_token}) response = aws_client.list_detectors(**list_detectors_args) detector_ids += response.get('DetectorIds', []) next_token = response.get('NextToken') demisto.info(f""AWSGuardDutyEventCollector - Found detector ids: {detector_ids}"") for detector_id in detector_ids: demisto.info(f""AWSGuardDutyEventCollector - Getting finding ids for detector id {detector_id}. "" f""Collecting from {collect_from.get(detector_id, collect_from_default)}"") next_token = 'starting_token' finding_ids: list = [] detector_events: list = [] updated_at = parse_date_string(collect_from.get(detector_id)) if collect_from.get( detector_id) else collect_from_default # List all finding ids while next_token and len(events) + len(finding_ids) < limit: demisto.info(f""AWSGuardDutyEventCollector - Getting more finding ids with next_token: {next_token} "" f""and updated_at {updated_at}"") list_finding_args = { 'DetectorId': detector_id, 'FindingCriteria': { 'Criterion': { 'updatedAt': {'Gte': date_to_timestamp(updated_at)}, 'severity': {'Gte': GD_SEVERITY_DICT.get(severity, 1)} } }, 'SortCriteria': { 'AttributeName': 'updatedAt', 'OrderBy': 'ASC' }, 'MaxResults': min(limit - (len(events) + len(set(finding_ids))), MAX_RESULTS) } if next_token != 'starting_token': list_finding_args.update({'NextToken': next_token}) list_findings = aws_client.list_findings(**list_finding_args) finding_ids += list_findings.get('FindingIds', []) next_token = list_findings.get('NextToken') # Handle duplicates and findings updated at the same time. if last_ids.get(detector_id) and last_ids.get(detector_id) in finding_ids: demisto.info(f""AWSGuardDutyEventCollector - Cutting finding_ids {finding_ids} "" f""for detector {detector_id} and last_id {last_ids.get(detector_id)}."") finding_ids = finding_ids[finding_ids.index(last_ids.get(detector_id)) + 1:] demisto.info( f""AWSGuardDutyEventCollector - New finding_ids {finding_ids} after cut "" f""for detector {detector_id} and last_id {last_ids.get(detector_id)}."") # Handle duplicates in response while preserving order finding_ids_unique = list(dict.fromkeys(finding_ids)) demisto.info(f""Detector id {detector_id} unique finding ids found: {finding_ids_unique}"") # Get all relevant findings chunked_finding_ids = [finding_ids_unique[i: i + max_ids_per_req] for i in range(0, len(finding_ids_unique), max_ids_per_req)] for chunk_of_finding_ids in chunked_finding_ids: demisto.info(f""Getting chunk of finding ids {chunk_of_finding_ids}"") findings_response = aws_client.get_findings(DetectorId=detector_id, FindingIds=chunk_of_finding_ids) detector_events += findings_response.get('Findings', []) demisto.info(f""AWSGuardDutyEventCollector - Detector id {detector_id} "" f""findings found ({len(detector_events)}): {detector_events}"") events += detector_events demisto.info(f""AWSGuardDutyEventCollector - Number of events is {len(events)}"") if finding_ids: new_last_ids[detector_id] = finding_ids[-1] if detector_events: new_collect_from[detector_id] = detector_events[-1].get('UpdatedAt', detector_events[-1].get('CreatedAt')) demisto.info(f""AWSGuardDutyEventCollector - Total number of events is {len(events)}"") events = convert_events_with_datetime_to_str(events) return events, new_last_ids, new_collect_from ","def get_events(aws_client: boto3.client, collect_from: dict, collect_from_default: Optional[datetime], last_ids: dict, severity: str, limit: int = MAX_RESULTS, detectors_num: int = MAX_RESULTS, max_ids_per_req: int = MAX_IDS_PER_REQ) -> Tuple[list, dict, dict]: """"""Get events from AWSGuardDuty. Args: aws_client: AWSClient session to get events from. collect_from: Dict of {detector_id: datestring to start collecting from}, used when fetching. collect_from_default: datetime to start collecting from if detector id is not found in collect_from keys. last_ids: Dict of {detector_id: last fetched id}, used to avoid duplicates. severity: The minimum severity to start fetching from. (inclusive) limit: The maximum number of events to fetch. detectors_num: The maximum number of detectors to fetch. max_ids_per_req: The maximum number of findings to get per API request. Returns: (events, new_last_ids, new_collect_from) events (list): The events fetched. new_last_ids (dict): The new last_ids dict, expected to receive as last_ids input in the next run. new_collect_from (dict): The new collect_from dict, expected to receive as collect_from input in the next run. """""" events: list = [] detector_ids: list = [] next_token = 'starting_token' new_last_ids = last_ids.copy() new_collect_from = collect_from.copy() demisto.debug(f""AWSGuardDutyEventCollector Starting get_events. {collect_from=}, "" f""{collect_from_default=}, {last_ids=}"") # List all detectors while next_token: list_detectors_args: dict = {'MaxResults': detectors_num} if next_token != 'starting_token': list_detectors_args.update({'NextToken': next_token}) response = aws_client.list_detectors(**list_detectors_args) detector_ids += response.get('DetectorIds', []) next_token = response.get('NextToken') demisto.info(f""AWSGuardDutyEventCollector - Found detector ids: {detector_ids}"") for detector_id in detector_ids: demisto.info(f""AWSGuardDutyEventCollector - Getting finding ids for detector id {detector_id}. "" f""Collecting from {collect_from.get(detector_id, collect_from_default)}"") next_token = 'starting_token' finding_ids: list = [] detector_events: list = [] updated_at = parse_date_string(collect_from.get(detector_id)) if collect_from.get( detector_id) else collect_from_default # List all finding ids while next_token and len(events) + len(finding_ids) < limit: demisto.info(f""AWSGuardDutyEventCollector - Getting more finding ids with next_token: {next_token} "" f""and updated_at {updated_at}"") list_finding_args = { 'DetectorId': detector_id, 'FindingCriteria': { 'Criterion': { 'updatedAt': {'Gte': date_to_timestamp(updated_at)}, 'severity': {'Gte': GD_SEVERITY_DICT.get(severity, 1)} } }, 'SortCriteria': { 'AttributeName': 'updatedAt', 'OrderBy': 'ASC' }, 'MaxResults': min(limit - (len(events) + len(set(finding_ids))), MAX_RESULTS) } if next_token != 'starting_token': list_finding_args.update({'NextToken': next_token}) list_findings = aws_client.list_findings(**list_finding_args) finding_ids += list_findings.get('FindingIds', []) next_token = list_findings.get('NextToken') # Handle duplicates and findings updated at the same time. if last_ids.get(detector_id) and last_ids.get(detector_id) in finding_ids: demisto.info(f""AWSGuardDutyEventCollector - Cutting finding_ids {finding_ids} "" f""for detector {detector_id} and last_id {last_ids.get(detector_id)}."") finding_ids = finding_ids[finding_ids.index(last_ids.get(detector_id)) + 1:] demisto.info( f""AWSGuardDutyEventCollector - New finding_ids {finding_ids} after cut "" f""for detector {detector_id} and last_id {last_ids.get(detector_id)}."") # Handle duplicates in response while preserving order finding_ids_unique = list(dict.fromkeys(finding_ids)) demisto.info(f""Detector id {detector_id} unique finding ids found: {finding_ids_unique}"") # Get all relevant findings chunked_finding_ids = [finding_ids_unique[i: i + max_ids_per_req] for i in range(0, len(finding_ids_unique), max_ids_per_req)] for chunk_of_finding_ids in chunked_finding_ids: demisto.info(f""Getting chunk of finding ids {chunk_of_finding_ids}"") findings_response = aws_client.get_findings(DetectorId=detector_id, FindingIds=chunk_of_finding_ids) detector_events += findings_response.get('Findings', []) demisto.info(f""AWSGuardDutyEventCollector - Detector id {detector_id} "" f""findings found ({len(detector_events)}): {detector_events}"") events += detector_events demisto.info(f""AWSGuardDutyEventCollector - Number of events is {len(events)}"") if finding_ids: new_last_ids[detector_id] = finding_ids[-1] if detector_events: new_collect_from[detector_id] = detector_events[-1].get('UpdatedAt', detector_events[-1].get('CreatedAt')) demisto.info(f""AWSGuardDutyEventCollector - Total number of events is {len(events)}"") events = convert_events_with_datetime_to_str(events) return events, new_last_ids, new_collect_from " 55611,"def needle_plot_test_props_callback( nclicks, p_name, p_value, prop_type=None, ): """"""Callback on a single user chosen prop on NeedlePlot component. :param nclicks: (string) html.Button 'n_clicks' Input :param p_name: (string) dcc.Input 'value' State (not used here) :param p_value: (string) dcc.Input 'value' State :param prop_type: (string) one of PARAM_TYPES keys default: None :return: the value of the prop to the dash.dependencies.Ouput() """""" answer = None if prop_type == 'dict': answer = {} # avoid triggering at the creation of the button in the layout if nclicks is not None: # convert the parameter value to the right type if prop_type in PROP_TYPES: p_value = PROP_TYPES[prop_type](p_value) answer = p_value return answer ","def needle_plot_test_props_callback( nclicks, p_name, p_value, prop_type=None, ): """"""Callback on a single user chosen prop on NeedlePlot component. :param nclicks: (string) html.Button 'n_clicks' Input :param p_name: (string) dcc.Input 'value' State (not used here) :param p_value: (string) dcc.Input 'value' State :param prop_type: (string) one of PARAM_TYPES keys default: None :return: the value of the prop to the dash.dependencies.Ouput() """""" if prop_type == 'dict': return {} elif n_clicks is not None: if prop_type in PROP_TYPES: return PROP_TYPES[prop_type](p_value) return None if prop_type == 'dict': answer = {} # avoid triggering at the creation of the button in the layout if nclicks is not None: # convert the parameter value to the right type if prop_type in PROP_TYPES: p_value = PROP_TYPES[prop_type](p_value) answer = p_value return answer " 7673,"def generate_basic_component(entity, uid=None, url=None, title=None, description=None): """"""Generate an iCalendar component with basic common properties. :param entity: Event/session/contribution where properties come from :param uid: UID for the component :param url: URL for the component (defaults to `entity.external_url`) :param title: A title for the component :param description: A text based description for the component :return: iCalendar event with basic properties """""" component = icalendar.Event() component.add('dtstamp', now_utc(False)) component.add('dtstart', entity.start_dt) component.add('dtend', entity.end_dt) component.add('summary', title or entity.title) if uid: component.add('uid', uid) if not url and hasattr(entity, 'external_url'): url = entity.external_url if url: component.add('url', url) location = (f'{entity.room_name} ({entity.venue_name})' if entity.venue_name and entity.room_name else (entity.venue_name or entity.room_name)) if location: component.add('location', location) speaker_list = getattr(entity, 'person_links', []) cal_description = [] if speaker_list: speakers = [f'{x.full_name} ({x.affiliation})' if x.affiliation else x.full_name for x in speaker_list] cal_description.append('Speakers: {}'.format(', '.join(speakers))) if description := description or getattr(entity, 'description', None): desc_text = str(description) or '

' # get rid of RichMarkup try: cal_description.append(str(html.fromstring(desc_text).text_content())) except ParserError: # this happens if desc_text only contains a html comment pass if url: cal_description.append(url) if cal_description: component.add('description', '\n'.join(cal_description)) return component ","def generate_basic_component(entity, uid=None, url=None, title=None, description=None): """"""Generate an iCalendar component with basic common properties. :param entity: Event/session/contribution where properties come from :param uid: UID for the component :param url: URL for the component (defaults to `entity.external_url`) :param title: A title for the component :param description: A text based description for the component :return: iCalendar event with basic properties """""" component = icalendar.Event() component.add('dtstamp', now_utc(False)) component.add('dtstart', entity.start_dt) component.add('dtend', entity.end_dt) component.add('summary', title or entity.title) if uid: component.add('uid', uid) if not url and hasattr(entity, 'external_url'): url = entity.external_url if url: component.add('url', url) location = (f'{entity.room_name} ({entity.venue_name})' if entity.venue_name and entity.room_name else (entity.venue_name or entity.room_name)) if location: component.add('location', location) speaker_list = getattr(entity, 'person_links', []) cal_description = [] if speaker_list: speakers = [f'{x.full_name} ({x.affiliation})' if x.affiliation else x.full_name for x in speaker_list] cal_description.append('Speakers: {}'.format(', '.join(speakers))) if description := (description or getattr(entity, 'description', None)): desc_text = str(description) or '

' # get rid of RichMarkup try: cal_description.append(str(html.fromstring(desc_text).text_content())) except ParserError: # this happens if desc_text only contains a html comment pass if url: cal_description.append(url) if cal_description: component.add('description', '\n'.join(cal_description)) return component " 41516,"def hypotest( poi_test, data, pdf, init_pars=None, par_bounds=None, fixed_params=None, calctype=""asymptotics"", return_tail_probs=False, return_expected=False, return_expected_set=False, **kwargs, ): r"""""" Compute :math:`p`-values and test statistics for a single value of the parameter of interest. See :py:class:`~pyhf.infer.calculators.AsymptoticCalculator` and :py:class:`~pyhf.infer.calculators.ToyCalculator` on additional keyword arguments to be specified. Example: >>> import pyhf >>> pyhf.set_backend(""numpy"") >>> model = pyhf.simplemodels.hepdata_like( ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0] ... ) >>> observations = [51, 48] >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata) >>> mu_test = 1.0 >>> CLs_obs, CLs_exp_band = pyhf.infer.hypotest( ... mu_test, data, model, return_expected_set=True, test_stat=""qtilde"" ... ) >>> CLs_obs array(0.05251497) >>> CLs_exp_band [array(0.00260626), array(0.01382005), array(0.06445321), array(0.23525644), array(0.57303621)] Args: poi_test (Number or Tensor): The value of the parameter of interest (POI) data (Number or Tensor): The data considered pdf (~pyhf.pdf.Model): The statistical model adhering to the schema ``model.json`` init_pars (:obj:`tensor` of :obj:`float`): The starting values of the model parameters for minimization. par_bounds (:obj:`tensor` of shape Nx2): The extrema of values the model parameters are allowed to reach in the fit. fixed_params (:obj:`tensor` of :obj:`bool`): The flag to set a parameter constant to its starting value during minimization. calctype (:obj:`str`): The calculator to create. Choose either 'asymptotics' (default) or 'toybased'. return_tail_probs (:obj:`bool`): Bool for returning :math:`\mathrm{CL}_{s+b}` and :math:`\mathrm{CL}_{b}` return_expected (:obj:`bool`): Bool for returning :math:`\mathrm{CL}_{\mathrm{exp}}` return_expected_set (:obj:`bool`): Bool for returning the :math:`(-2,-1,0,1,2)\sigma` :math:`\mathrm{CL}_{\mathrm{exp}}` --- the ""Brazil band"" Returns: Tuple of Floats and lists of Floats: - :math:`\mathrm{CL}_{s}`: The modified :math:`p`-value compared to the given threshold :math:`\alpha`, typically taken to be :math:`0.05`, defined in :xref:`arXiv:1007.1727` as .. math:: \mathrm{CL}_{s} = \frac{\mathrm{CL}_{s+b}}{\mathrm{CL}_{b}} = \frac{p_{s+b}}{1-p_{b}} to protect against excluding signal models in which there is little sensitivity. In the case that :math:`\mathrm{CL}_{s} \leq \alpha` the given signal model is excluded. - :math:`\left[\mathrm{CL}_{s+b}, \mathrm{CL}_{b}\right]`: The signal + background model hypothesis :math:`p`-value .. math:: \mathrm{CL}_{s+b} = p_{s+b} = p\left(q \geq q_{\mathrm{obs}}\middle|s+b\right) = \int\limits_{q_{\mathrm{obs}}}^{\infty} f\left(q\,\middle|s+b\right)\,dq = 1 - F\left(q_{\mathrm{obs}}(\mu)\,\middle|\mu'\right) and 1 minus the background only model hypothesis :math:`p`-value .. math:: \mathrm{CL}_{b} = 1- p_{b} = p\left(q \geq q_{\mathrm{obs}}\middle|b\right) = 1 - \int\limits_{-\infty}^{q_{\mathrm{obs}}} f\left(q\,\middle|b\right)\,dq = 1 - F\left(q_{\mathrm{obs}}(\mu)\,\middle|0\right) for signal strength :math:`\mu` and model hypothesis signal strength :math:`\mu'`, where the cumulative density functions :math:`F\left(q(\mu)\,\middle|\mu'\right)` are given by Equations (57) and (65) of :xref:`arXiv:1007.1727` for upper-limit-like test statistic :math:`q \in \{q_{\mu}, \tilde{q}_{\mu}\}`. Only returned when ``return_tail_probs`` is ``True``. .. note:: The definitions of the :math:`\mathrm{CL}_{s+b}` and :math:`\mathrm{CL}_{b}` used are based on profile likelihood ratio test statistics. This procedure is common in the LHC-era, but differs from procedures used in the LEP and Tevatron eras, as briefly discussed in :math:`\S` 3.8 of :xref:`arXiv:1007.1727`. - :math:`\mathrm{CL}_{s,\mathrm{exp}}`: The expected :math:`\mathrm{CL}_{s}` value corresponding to the test statistic under the background only hypothesis :math:`\left(\mu=0\right)`. Only returned when ``return_expected`` is ``True``. - :math:`\mathrm{CL}_{s,\mathrm{exp}}` band: The set of expected :math:`\mathrm{CL}_{s}` values corresponding to the median significance of variations of the signal strength from the background only hypothesis :math:`\left(\mu=0\right)` at :math:`(-2,-1,0,1,2)\sigma`. That is, the :math:`p`-values that satisfy Equation (89) of :xref:`arXiv:1007.1727` .. math:: \mathrm{band}_{N\sigma} = \mu' + \sigma\,\Phi^{-1}\left(1-\alpha\right) \pm N\sigma for :math:`\mu'=0` and :math:`N \in \left\{-2, -1, 0, 1, 2\right\}`. These values define the boundaries of an uncertainty band sometimes referred to as the ""Brazil band"". Only returned when ``return_expected_set`` is ``True``. """""" init_pars = init_pars or pdf.config.suggested_init() par_bounds = par_bounds or pdf.config.suggested_bounds() fixed_params = fixed_params or pdf.config.suggested_fixed() calc = utils.create_calculator( calctype, data, pdf, init_pars, par_bounds, fixed_params, **kwargs, ) teststat = calc.teststatistic(poi_test) sig_plus_bkg_distribution, bkg_only_distribution = calc.distributions(poi_test) tb, _ = get_backend() CLsb_obs, CLb_obs, CLs_obs = tuple( tb.astensor(pvalue) for pvalue in calc.pvalues( teststat, sig_plus_bkg_distribution, bkg_only_distribution ) ) CLsb_exp, CLb_exp, CLs_exp = calc.expected_pvalues( sig_plus_bkg_distribution, bkg_only_distribution ) is_q0 = kwargs.get('test_stat', 'qtilde') == 'q0' _returns = [CLsb_obs if is_q0 else CLs_obs] if return_tail_probs: if is_q0: _returns.append([CLb_obs]) else: _returns.append([CLsb_obs, CLb_obs]) pvalues_exp_band = [ tb.astensor(pvalue) for pvalue in (CLsb_exp if is_q0 else CLs_exp) ] if return_expected_set: if return_expected: _returns.append(tb.astensor(pvalues_exp_band[2])) _returns.append(pvalues_exp_band) elif return_expected: _returns.append(tb.astensor(pvalues_exp_band[2])) # Enforce a consistent return type of the observed CLs return tuple(_returns) if len(_returns) > 1 else _returns[0] ","def hypotest( poi_test, data, pdf, init_pars=None, par_bounds=None, fixed_params=None, calctype=""asymptotics"", return_tail_probs=False, return_expected=False, return_expected_set=False, **kwargs, ): r"""""" Compute :math:`p`-values and test statistics for a single value of the parameter of interest. See :py:class:`~pyhf.infer.calculators.AsymptoticCalculator` and :py:class:`~pyhf.infer.calculators.ToyCalculator` on additional keyword arguments to be specified. Example: >>> import pyhf >>> pyhf.set_backend(""numpy"") >>> model = pyhf.simplemodels.hepdata_like( ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0] ... ) >>> observations = [51, 48] >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata) >>> mu_test = 1.0 >>> CLs_obs, CLs_exp_band = pyhf.infer.hypotest( ... mu_test, data, model, return_expected_set=True, test_stat=""qtilde"" ... ) >>> CLs_obs array(0.05251497) >>> CLs_exp_band [array(0.00260626), array(0.01382005), array(0.06445321), array(0.23525644), array(0.57303621)] Args: poi_test (Number or Tensor): The value of the parameter of interest (POI) data (Number or Tensor): The data considered pdf (~pyhf.pdf.Model): The statistical model adhering to the schema ``model.json`` init_pars (:obj:`tensor` of :obj:`float`): The starting values of the model parameters for minimization. par_bounds (:obj:`tensor` of shape Nx2): The extrema of values the model parameters are allowed to reach in the fit. fixed_params (:obj:`tensor` of :obj:`bool`): The flag to set a parameter constant to its starting value during minimization. calctype (:obj:`str`): The calculator to create. Choose either 'asymptotics' (default) or 'toybased'. return_tail_probs (:obj:`bool`): Bool for returning :math:`\mathrm{CL}_{s+b}` and :math:`\mathrm{CL}_{b}` return_expected (:obj:`bool`): Bool for returning :math:`\mathrm{CL}_{\mathrm{exp}}` return_expected_set (:obj:`bool`): Bool for returning the :math:`(-2,-1,0,1,2)\sigma` :math:`\mathrm{CL}_{\mathrm{exp}}` --- the ""Brazil band"" Returns: Tuple of Floats and lists of Floats: - :math:`\mathrm{CL}_{s}`: The modified :math:`p`-value compared to the given threshold :math:`\alpha`, typically taken to be :math:`0.05`, defined in :xref:`arXiv:1007.1727` as .. math:: \mathrm{CL}_{s} = \frac{\mathrm{CL}_{s+b}}{\mathrm{CL}_{b}} = \frac{p_{s+b}}{1-p_{b}} to protect against excluding signal models in which there is little sensitivity. In the case that :math:`\mathrm{CL}_{s} \leq \alpha` the given signal model is excluded. - :math:`\left[\mathrm{CL}_{s+b}, \mathrm{CL}_{b}\right]`: The signal + background model hypothesis :math:`p`-value .. math:: \mathrm{CL}_{s+b} = p_{s+b} = p\left(q \geq q_{\mathrm{obs}}\middle|s+b\right) = \int\limits_{q_{\mathrm{obs}}}^{\infty} f\left(q\,\middle|s+b\right)\,dq = 1 - F\left(q_{\mathrm{obs}}(\mu)\,\middle|\mu'\right) and 1 minus the background only model hypothesis :math:`p`-value .. math:: \mathrm{CL}_{b} = 1- p_{b} = p\left(q \geq q_{\mathrm{obs}}\middle|b\right) = 1 - \int\limits_{-\infty}^{q_{\mathrm{obs}}} f\left(q\,\middle|b\right)\,dq = 1 - F\left(q_{\mathrm{obs}}(\mu)\,\middle|0\right) for signal strength :math:`\mu` and model hypothesis signal strength :math:`\mu'`, where the cumulative density functions :math:`F\left(q(\mu)\,\middle|\mu'\right)` are given by Equations (57) and (65) of :xref:`arXiv:1007.1727` for upper-limit-like test statistic :math:`q \in \{q_{\mu}, \tilde{q}_{\mu}\}`. Only returned when ``return_tail_probs`` is ``True``. .. note:: The definitions of the :math:`\mathrm{CL}_{s+b}` and :math:`\mathrm{CL}_{b}` used are based on profile likelihood ratio test statistics. This procedure is common in the LHC-era, but differs from procedures used in the LEP and Tevatron eras, as briefly discussed in :math:`\S` 3.8 of :xref:`arXiv:1007.1727`. - :math:`\mathrm{CL}_{s,\mathrm{exp}}`: The expected :math:`\mathrm{CL}_{s}` value corresponding to the test statistic under the background only hypothesis :math:`\left(\mu=0\right)`. Only returned when ``return_expected`` is ``True``. - :math:`\mathrm{CL}_{s,\mathrm{exp}}` band: The set of expected :math:`\mathrm{CL}_{s}` values corresponding to the median significance of variations of the signal strength from the background only hypothesis :math:`\left(\mu=0\right)` at :math:`(-2,-1,0,1,2)\sigma`. That is, the :math:`p`-values that satisfy Equation (89) of :xref:`arXiv:1007.1727` .. math:: \mathrm{band}_{N\sigma} = \mu' + \sigma\,\Phi^{-1}\left(1-\alpha\right) \pm N\sigma for :math:`\mu'=0` and :math:`N \in \left\{-2, -1, 0, 1, 2\right\}`. These values define the boundaries of an uncertainty band sometimes referred to as the ""Brazil band"". Only returned when ``return_expected_set`` is ``True``. """""" init_pars = init_pars or pdf.config.suggested_init() par_bounds = par_bounds or pdf.config.suggested_bounds() fixed_params = fixed_params or pdf.config.suggested_fixed() calc = utils.create_calculator( calctype, data, pdf, init_pars, par_bounds, fixed_params, **kwargs, ) teststat = calc.teststatistic(poi_test) sig_plus_bkg_distribution, bkg_only_distribution = calc.distributions(poi_test) tb, _ = get_backend() CLsb_obs, CLb_obs, CLs_obs = tuple( tb.astensor(pvalue) for pvalue in calc.pvalues( teststat, sig_plus_bkg_distribution, bkg_only_distribution ) ) CLsb_exp, CLb_exp, CLs_exp = calc.expected_pvalues( sig_plus_bkg_distribution, bkg_only_distribution ) is_q0 = kwargs.get('test_stat', 'qtilde') == 'q0' _returns = [CLsb_obs if is_q0 else CLs_obs] if return_tail_probs: if is_q0: _returns.append([CLb_obs]) else: _returns.append([CLsb_obs, CLb_obs]) pvalues_exp_band = [ tb.astensor(pvalue) for pvalue in (CLsb_exp if is_q0 else CLs_exp) ] if return_expected_set: if return_expected: _returns.append(tb.astensor(pvalues_exp_band[2])) _returns.append(pvalues_exp_band) elif return_expected: _returns.append(tb.astensor(pvalues_exp_band[2])) # Enforce a consistent return type of the observed CLs return tuple(_returns) if len(_returns) > 1 else _returns[0] " 42936,"def plot(graph: nx.Graph, subgraph: Optional[list] = None, size: float = 500) -> None: """""" Creates a plot.ly plot of the input graph. The graph layout is fixed to be the Kamada-Kawai layout with an aspect ratio of 1:1. The function can plot just the input graph or the graph with a specified subgraph highlighted. The function uses the standard colour theme of green nodes, grey edges, and red highlighted subgraph. **Example usage**: >>> graph = nx.complete_graph(10) >>> fig = plot(graph, [0, 1, 2, 3]) >>> fig.show() Args: graph (nx.Graph): input graph subgraph (list): list of nodes comprising the subgraph to highlight size (dict): size of the plot Returns: plot.ly graph object """""" s = graph.subgraph(subgraph) l = nx.kamada_kawai_layout(graph) g_nodes = go.Scatter( **_node_coords(graph, l), mode='markers', hoverinfo='text', marker=dict(color=graph_node_colour, size=graph_node_size, line_width=2) ) g_edges = go.Scatter( **edge_coords(graph, l), line=dict(width=1, color=graph_edge_colour), hoverinfo='none', mode='lines' ) g_nodes.text = [str(i) for i in graph.nodes()] layout = go.Layout(showlegend=False, hovermode='closest', xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False), margin=dict(b=0, l=0, r=0, t=25), height=size, width=size, plot_bgcolor='#ffffff' ) if subgraph: s_edges = go.Scatter( **edge_coords(s, l), line=dict(width=2, color=subgraph_edge_colour), hoverinfo='none', mode='lines' ) s_nodes = go.Scatter( **_node_coords(s, l), mode='markers', hoverinfo='text', marker=dict(color=subgraph_node_colour, size=subgraph_node_size, line_width=2) ) s_nodes.text = [str(i) for i in s.nodes()] f = go.Figure(data=[g_edges, s_edges, g_nodes, s_nodes], layout=layout) else: f = go.Figure(data=[g_edges, g_nodes], layout=layout) return f ","def plot(graph: nx.Graph, subgraph: Optional[list] = None, size: float = 500) -> None: """""" Creates a plot.ly plot of the input graph. The graph layout is fixed to be the Kamada-Kawai layout with an aspect ratio of 1:1. The function can plot just the input graph or the graph with a specified subgraph highlighted. The function uses the standard colour theme of green nodes, grey edges, and red highlighted subgraph. **Example usage**: >>> graph = nx.complete_graph(10) >>> fig = plot(graph, [0, 1, 2, 3]) >>> fig.show() Args: graph (nx.Graph): input graph subgraph (list): list of nodes comprising the subgraph to highlight size (dict): size of the plot Returns: Figure: Plotly figure with graph and subgraph plotted """""" s = graph.subgraph(subgraph) l = nx.kamada_kawai_layout(graph) g_nodes = go.Scatter( **_node_coords(graph, l), mode='markers', hoverinfo='text', marker=dict(color=graph_node_colour, size=graph_node_size, line_width=2) ) g_edges = go.Scatter( **edge_coords(graph, l), line=dict(width=1, color=graph_edge_colour), hoverinfo='none', mode='lines' ) g_nodes.text = [str(i) for i in graph.nodes()] layout = go.Layout(showlegend=False, hovermode='closest', xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False), margin=dict(b=0, l=0, r=0, t=25), height=size, width=size, plot_bgcolor='#ffffff' ) if subgraph: s_edges = go.Scatter( **edge_coords(s, l), line=dict(width=2, color=subgraph_edge_colour), hoverinfo='none', mode='lines' ) s_nodes = go.Scatter( **_node_coords(s, l), mode='markers', hoverinfo='text', marker=dict(color=subgraph_node_colour, size=subgraph_node_size, line_width=2) ) s_nodes.text = [str(i) for i in s.nodes()] f = go.Figure(data=[g_edges, s_edges, g_nodes, s_nodes], layout=layout) else: f = go.Figure(data=[g_edges, g_nodes], layout=layout) return f " 41219,"def known_two_q_operations_to_sycamore_operations( qubit_a: cirq.Qid, qubit_b: cirq.Qid, op: cirq.Operation, tabulation: Optional[GateTabulation] = None, ) -> cirq.OP_TREE: """"""Synthesize a known gate operation to a sycamore operation This function dispatches based on gate type Args: qubit_a: first qubit of GateOperation qubit_b: second qubit of GateOperation op: operation to decompose tabulation: A tabulation for the Sycamore gate to use for decomposing gates. Returns: New operations iterable object """""" gate = op.gate if isinstance(gate, cirq.PhasedISwapPowGate): if math.isclose(gate.exponent, 1): return decompose_phased_iswap_into_syc(gate.phase_exponent, qubit_a, qubit_b) elif math.isclose(gate.phase_exponent, 0.25): return decompose_phased_iswap_into_syc_precomputed( gate.exponent * np.pi / 2, qubit_a, qubit_b ) else: raise ValueError( ""To decompose PhasedISwapPowGate, it must have a phase_exponent"" "" of .25 OR an exponent of 1.0, but got: {!r}"".format(op) ) if isinstance(gate, cirq.CNotPowGate): return [ cirq.Y(qubit_b) ** -0.5, cphase(gate.exponent * np.pi, qubit_a, qubit_b), cirq.Y(qubit_b) ** 0.5, ] elif isinstance(gate, cirq.CZPowGate): if math.isclose(gate.exponent, 1): # check if CZ or CPHASE return decompose_cz_into_syc(qubit_a, qubit_b) else: # because CZPowGate == diag([1, 1, 1, e^{i pi phi}]) return cphase(gate.exponent * np.pi, qubit_a, qubit_b) elif isinstance(gate, cirq.SwapPowGate) and math.isclose(gate.exponent, 1): return decompose_swap_into_syc(qubit_a, qubit_b) elif isinstance(gate, cirq.ISwapPowGate) and math.isclose(gate.exponent, 1): return decompose_iswap_into_syc(qubit_a, qubit_b) elif isinstance(gate, cirq.ZZPowGate): return rzz(gate.exponent * np.pi / 2, *op.qubits) elif cirq.unitary(gate, None) is not None: if tabulation: return decompose_arbitrary_into_syc_tabulation(qubit_a, qubit_b, op, tabulation) else: return decompose_arbitrary_into_syc_analytic(qubit_a, qubit_b, op) else: raise ValueError(f""Unrecognized gate: {op!r}"") ","def known_two_q_operations_to_sycamore_operations( qubit_a: cirq.Qid, qubit_b: cirq.Qid, op: cirq.Operation, tabulation: Optional[GateTabulation] = None, ) -> cirq.OP_TREE: """"""Synthesizes a known gate operation to a sycamore operation. This function dispatches based on gate type Args: qubit_a: first qubit of GateOperation qubit_b: second qubit of GateOperation op: operation to decompose tabulation: A tabulation for the Sycamore gate to use for decomposing gates. Returns: New operations iterable object """""" gate = op.gate if isinstance(gate, cirq.PhasedISwapPowGate): if math.isclose(gate.exponent, 1): return decompose_phased_iswap_into_syc(gate.phase_exponent, qubit_a, qubit_b) elif math.isclose(gate.phase_exponent, 0.25): return decompose_phased_iswap_into_syc_precomputed( gate.exponent * np.pi / 2, qubit_a, qubit_b ) else: raise ValueError( ""To decompose PhasedISwapPowGate, it must have a phase_exponent"" "" of .25 OR an exponent of 1.0, but got: {!r}"".format(op) ) if isinstance(gate, cirq.CNotPowGate): return [ cirq.Y(qubit_b) ** -0.5, cphase(gate.exponent * np.pi, qubit_a, qubit_b), cirq.Y(qubit_b) ** 0.5, ] elif isinstance(gate, cirq.CZPowGate): if math.isclose(gate.exponent, 1): # check if CZ or CPHASE return decompose_cz_into_syc(qubit_a, qubit_b) else: # because CZPowGate == diag([1, 1, 1, e^{i pi phi}]) return cphase(gate.exponent * np.pi, qubit_a, qubit_b) elif isinstance(gate, cirq.SwapPowGate) and math.isclose(gate.exponent, 1): return decompose_swap_into_syc(qubit_a, qubit_b) elif isinstance(gate, cirq.ISwapPowGate) and math.isclose(gate.exponent, 1): return decompose_iswap_into_syc(qubit_a, qubit_b) elif isinstance(gate, cirq.ZZPowGate): return rzz(gate.exponent * np.pi / 2, *op.qubits) elif cirq.unitary(gate, None) is not None: if tabulation: return decompose_arbitrary_into_syc_tabulation(qubit_a, qubit_b, op, tabulation) else: return decompose_arbitrary_into_syc_analytic(qubit_a, qubit_b, op) else: raise ValueError(f""Unrecognized gate: {op!r}"") " 3454,"def filter_to_condition(search_filter: SearchFilter, query_config: QueryConfig) -> Condition: """"""Coerce SearchFilter syntax to snuba Condtion syntax."""""" # Validate field exists and is filterable. field_alias = search_filter.key.name field = query_config.get(field_alias) if field is None: raise ParseError(f""Invalid field specified: {field_alias}."") elif not field.is_filterable: raise ParseError(f'""{field_alias}"" is not filterable.') # Validate strategy is correct. query_operator = search_filter.operator operator, errors = field.deserialize_operator(query_operator) if errors: raise ParseError(f""Invalid operator specified: {field_alias}."") # Deserialize value to its correct type or error. query_value = search_filter.value.value value, errors = field.deserialize_value(query_value) if errors: raise ParseError(f""Invalid value specified: {field_alias}."") return Condition(Column(field.query_alias or field.attribute_name), operator, value) ","def filter_to_condition(search_filter: SearchFilter, query_config: QueryConfig) -> Condition: """"""Coerce SearchFilter syntax to snuba Condition syntax."""""" # Validate field exists and is filterable. field_alias = search_filter.key.name field = query_config.get(field_alias) if field is None: raise ParseError(f""Invalid field specified: {field_alias}."") elif not field.is_filterable: raise ParseError(f'""{field_alias}"" is not filterable.') # Validate strategy is correct. query_operator = search_filter.operator operator, errors = field.deserialize_operator(query_operator) if errors: raise ParseError(f""Invalid operator specified: {field_alias}."") # Deserialize value to its correct type or error. query_value = search_filter.value.value value, errors = field.deserialize_value(query_value) if errors: raise ParseError(f""Invalid value specified: {field_alias}."") return Condition(Column(field.query_alias or field.attribute_name), operator, value) " 7143,"def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0, overlap=.5, exclude_border=False): r""""""Finds blobs in the given grayscale image. Blobs are found using the Difference of Gaussian (DoG) method [1]_. For each blob found, the method returns its coordinates and the standard deviation of the Gaussian kernel that detected the blob. Parameters ---------- image : 2D or 3D ndarray Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : scalar or sequence of scalars, optional the minimum standard deviation for Gaussian kernel. Keep this low to detect smaller blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. max_sigma : scalar or sequence of scalars, optional The maximum standard deviation for Gaussian kernel. Keep this high to detect larger blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. sigma_ratio : float, optional The ratio between the standard deviation of Gaussian Kernels used for computing the Difference of Gaussians threshold : float, optional. The absolute lower bound for scale space maxima. Local maxima smaller than thresh are ignored. Reduce this to detect blobs with less intensities. overlap : float, optional A value between 0 and 1. If the area of two blobs overlaps by a fraction greater than `threshold`, the smaller blob is eliminated. exclude_border : int or bool, optional If nonzero int, `exclude_border` excludes blobs from within `exclude_border`-pixels of the border of the image. Returns ------- A : (n, image.ndim + sigma) ndarray A 2d array with each row representing 2 coordinate values for a 2D image, and 3 coordinate values for a 3D image, plus the sigma(s) used. When a single sigma is passed, outputs are: ``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard deviation of the Gaussian kernel which detected the blob. When an anisotropic gaussian is used (sigmas per dimension), the detected sigma is returned for each dimension. References ---------- .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach Examples -------- >>> from skimage import data, feature >>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40) array([[ 267. , 359. , 16.777216], [ 267. , 115. , 10.48576 ], [ 263. , 302. , 16.777216], [ 263. , 245. , 16.777216], [ 261. , 173. , 16.777216], [ 260. , 46. , 16.777216], [ 198. , 155. , 10.48576 ], [ 196. , 43. , 10.48576 ], [ 195. , 102. , 16.777216], [ 194. , 277. , 16.777216], [ 193. , 213. , 16.777216], [ 185. , 347. , 16.777216], [ 128. , 154. , 10.48576 ], [ 127. , 102. , 10.48576 ], [ 125. , 208. , 10.48576 ], [ 125. , 45. , 16.777216], [ 124. , 337. , 10.48576 ], [ 120. , 272. , 16.777216], [ 58. , 100. , 10.48576 ], [ 54. , 276. , 10.48576 ], [ 54. , 42. , 16.777216], [ 52. , 216. , 16.777216], [ 52. , 155. , 16.777216], [ 45. , 336. , 16.777216]]) Notes ----- The radius of each blob is approximately :math:`\sqrt{2}\sigma` for a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image. """""" image = img_as_float(image) # Gaussian filter requires that sequence-type sigmas have same # dimensionality as image. This broadcasts scalar kernels if isinstance(max_sigma, (int, float)): max_sigma = np.full(len(image.shape), max_sigma, dtype=np.float) if isinstance(min_sigma, (int, float)): min_sigma = np.full(len(image.shape), min_sigma, dtype=np.float) # Convert sequence types to array min_sigma = np.asarray(min_sigma, dtype=np.float) max_sigma = np.asarray(max_sigma, dtype=np.float) # k such that min_sigma*(sigma_ratio**k) > max_sigma k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1)) # a geometric progression of standard deviations for gaussian kernels sigma_list = np.array([min_sigma * (sigma_ratio ** i) for i in range(k + 1)]) gaussian_images = [gaussian_filter(image, s) for s in sigma_list] # computing difference between two successive Gaussian blurred images # multiplying with average standard deviation provides scale invariance dog_images = [(gaussian_images[i] - gaussian_images[i + 1]) * np.mean(sigma_list[i]) for i in range(k)] image_cube = np.stack(dog_images, axis=-1) # local_maxima = get_local_maxima(image_cube, threshold) local_maxima = peak_local_max(image_cube, threshold_abs=threshold, footprint=np.ones((3,) * (image.ndim + 1)), threshold_rel=0.0, exclude_border=exclude_border) # Catch no peaks if local_maxima.size == 0: return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(np.float64) # translate final column of lm, which contains the index of the # sigma that produced the maximum intensity value, into the sigma sigmas_of_peaks = sigma_list[local_maxima[:, -1]] # if the gaussian is isotropic, the stdev across dimensions are # identical, so return only the stdev deviation of the first dimension if np.unique(min_sigma).shape == (1,) and np.unique(max_sigma).shape == (1,): sigmas_of_peaks = sigmas_of_peaks[:, 0][:, None] # Remove sigma index and replace with sigmas lm = np.hstack([lm[:, :-1], sigmas_of_peaks]) return _prune_blobs(lm, overlap) ","def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0, overlap=.5, exclude_border=False): r""""""Finds blobs in the given grayscale image. Blobs are found using the Difference of Gaussian (DoG) method [1]_. For each blob found, the method returns its coordinates and the standard deviation of the Gaussian kernel that detected the blob. Parameters ---------- image : 2D or 3D ndarray Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : scalar or sequence of scalars, optional the minimum standard deviation for Gaussian kernel. Keep this low to detect smaller blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. max_sigma : scalar or sequence of scalars, optional The maximum standard deviation for Gaussian kernel. Keep this high to detect larger blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. sigma_ratio : float, optional The ratio between the standard deviation of Gaussian Kernels used for computing the Difference of Gaussians threshold : float, optional. The absolute lower bound for scale space maxima. Local maxima smaller than thresh are ignored. Reduce this to detect blobs with less intensities. overlap : float, optional A value between 0 and 1. If the area of two blobs overlaps by a fraction greater than `threshold`, the smaller blob is eliminated. exclude_border : int or bool, optional If nonzero int, `exclude_border` excludes blobs from within `exclude_border`-pixels of the border of the image. Returns ------- A : (n, image.ndim + sigma) ndarray A 2d array with each row representing 2 coordinate values for a 2D image, and 3 coordinate values for a 3D image, plus the sigma(s) used. When a single sigma is passed, outputs are: ``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard deviation of the Gaussian kernel which detected the blob. When an anisotropic gaussian is used (sigmas per dimension), the detected sigma is returned for each dimension. References ---------- .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach Examples -------- >>> from skimage import data, feature >>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40) array([[ 267. , 359. , 16.777216], [ 267. , 115. , 10.48576 ], [ 263. , 302. , 16.777216], [ 263. , 245. , 16.777216], [ 261. , 173. , 16.777216], [ 260. , 46. , 16.777216], [ 198. , 155. , 10.48576 ], [ 196. , 43. , 10.48576 ], [ 195. , 102. , 16.777216], [ 194. , 277. , 16.777216], [ 193. , 213. , 16.777216], [ 185. , 347. , 16.777216], [ 128. , 154. , 10.48576 ], [ 127. , 102. , 10.48576 ], [ 125. , 208. , 10.48576 ], [ 125. , 45. , 16.777216], [ 124. , 337. , 10.48576 ], [ 120. , 272. , 16.777216], [ 58. , 100. , 10.48576 ], [ 54. , 276. , 10.48576 ], [ 54. , 42. , 16.777216], [ 52. , 216. , 16.777216], [ 52. , 155. , 16.777216], [ 45. , 336. , 16.777216]]) Notes ----- The radius of each blob is approximately :math:`\sqrt{2}\sigma` for a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image. """""" image = img_as_float(image) # Gaussian filter requires that sequence-type sigmas have same # dimensionality as image. This broadcasts scalar kernels if isinstance(max_sigma, (int, float)): max_sigma = np.full(len(image.shape), max_sigma, dtype=np.float) if isinstance(min_sigma, (int, float)): min_sigma = np.full(image.ndim, min_sigma, dtype=float) # Convert sequence types to array min_sigma = np.asarray(min_sigma, dtype=np.float) max_sigma = np.asarray(max_sigma, dtype=np.float) # k such that min_sigma*(sigma_ratio**k) > max_sigma k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1)) # a geometric progression of standard deviations for gaussian kernels sigma_list = np.array([min_sigma * (sigma_ratio ** i) for i in range(k + 1)]) gaussian_images = [gaussian_filter(image, s) for s in sigma_list] # computing difference between two successive Gaussian blurred images # multiplying with average standard deviation provides scale invariance dog_images = [(gaussian_images[i] - gaussian_images[i + 1]) * np.mean(sigma_list[i]) for i in range(k)] image_cube = np.stack(dog_images, axis=-1) # local_maxima = get_local_maxima(image_cube, threshold) local_maxima = peak_local_max(image_cube, threshold_abs=threshold, footprint=np.ones((3,) * (image.ndim + 1)), threshold_rel=0.0, exclude_border=exclude_border) # Catch no peaks if local_maxima.size == 0: return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(np.float64) # translate final column of lm, which contains the index of the # sigma that produced the maximum intensity value, into the sigma sigmas_of_peaks = sigma_list[local_maxima[:, -1]] # if the gaussian is isotropic, the stdev across dimensions are # identical, so return only the stdev deviation of the first dimension if np.unique(min_sigma).shape == (1,) and np.unique(max_sigma).shape == (1,): sigmas_of_peaks = sigmas_of_peaks[:, 0][:, None] # Remove sigma index and replace with sigmas lm = np.hstack([lm[:, :-1], sigmas_of_peaks]) return _prune_blobs(lm, overlap) " 34557,"def validate_only_one_tokenizer_is_used(pipeline: List[""Component""]) -> None: """"""Validates that only one tokenizer is present in the pipeline. Args: pipeline: the list of the :class:`rasa.nlu.components.Component`. """""" from rasa.nlu.tokenizers.tokenizer import Tokenizer tokenizer_names = [] for component in pipeline: if isinstance(component, Tokenizer): tokenizer_names.append(component.name) if len(tokenizer_names) > 1: raise InvalidConfigError( f""The pipeline configuration contains more than one tokenizer, "" f""which is not possible at this time. You can only use one tokenizer. "" f""The pipeline contained the following tokenizers: {tokenizer_names}. "" ) ","def validate_only_one_tokenizer_is_used(pipeline: List[""Component""]) -> None: """"""Validates that only one tokenizer is present in the pipeline. Args: pipeline: the list of the :class:`rasa.nlu.components.Component`. """""" from rasa.nlu.tokenizers.tokenizer import Tokenizer tokenizer_names = [] for component in pipeline: if isinstance(component, Tokenizer): tokenizer_names.append(component.name) if len(tokenizer_names) > 1: raise InvalidConfigError( f""The pipeline configuration contains more than one tokenizer, "" f""which is not possible at this time. You can only use one tokenizer. "" f""The pipeline contains the following tokenizers: {', '.join(tokenizer_names)}. "" ) " 4472,"def _voroni_topomap(data, pos, info, sphere, ch_type, outlines, ax, cmap, norm): """"""Make a Voroni diagram on a topomap."""""" from scipy.spatial import Voronoi sphere = _check_sphere(sphere) clip_origin = _adjust_meg_sphere(sphere, info, ch_type)[1] outlines = _make_head_outlines( sphere, pos, outlines, clip_origin) rx, ry = outlines['clip_radius'] cx, cy = clip_origin # add faroff points in a circle vor = Voronoi(np.concatenate([pos, [(np.cos(2 * np.pi / 100 * t), np.sin(2 * np.pi / 100 * t)) for t in range(101)]])) for point_idx, region_idx in enumerate(vor.point_region[:-101]): if -1 in vor.regions[region_idx]: continue polygon = list() for i in vor.regions[region_idx]: x, y = vor.vertices[i] if (x - cx)**2 / rx**2 + (y - cy)**2 / ry**2 < 1: polygon.append((x, y)) else: x *= rx / np.linalg.norm(vor.vertices[i]) y *= ry / np.linalg.norm(vor.vertices[i]) polygon.append((x, y)) ax.fill(*zip(*polygon), color=cmap(norm(data[point_idx]))) ","def _voroni_topomap(data, pos, info, sphere, ch_type, outlines, ax, cmap, norm): """"""Make a Voronoi diagram on a topomap."""""" from scipy.spatial import Voronoi sphere = _check_sphere(sphere) clip_origin = _adjust_meg_sphere(sphere, info, ch_type)[1] outlines = _make_head_outlines( sphere, pos, outlines, clip_origin) rx, ry = outlines['clip_radius'] cx, cy = clip_origin # add faroff points in a circle vor = Voronoi(np.concatenate([pos, [(np.cos(2 * np.pi / 100 * t), np.sin(2 * np.pi / 100 * t)) for t in range(101)]])) for point_idx, region_idx in enumerate(vor.point_region[:-101]): if -1 in vor.regions[region_idx]: continue polygon = list() for i in vor.regions[region_idx]: x, y = vor.vertices[i] if (x - cx)**2 / rx**2 + (y - cy)**2 / ry**2 < 1: polygon.append((x, y)) else: x *= rx / np.linalg.norm(vor.vertices[i]) y *= ry / np.linalg.norm(vor.vertices[i]) polygon.append((x, y)) ax.fill(*zip(*polygon), color=cmap(norm(data[point_idx]))) " 39566,"def main(): """""" Synchronizes a github repository with a local repository. """""" logging.basicConfig( format='[%(asctime)s] %(levelname)s -- %(message)s', level=logging.DEBUG) parser = argparse.ArgumentParser(description='Synchronizes a github repository with a local repository.') parser.add_argument('git_url', help='Url of the repo to sync') parser.add_argument('--branch_name', default=None, required=False, help='Branch of repo to sync', nargs='?') parser.add_argument('--repo_dir', default='.', required=False, help='Path to clone repo under', nargs='?') args = parser.parse_args() for line in GitPuller( args.git_url, args.repo_dir, branch=args.branch_name ).pull(): print(line) ","def main(): """""" Synchronizes a github repository with a local repository. """""" logging.basicConfig( format='[%(asctime)s] %(levelname)s -- %(message)s', level=logging.DEBUG) parser = argparse.ArgumentParser(description='Synchronizes a github repository with a local repository.') parser.add_argument('git_url', help='Url of the repo to sync') parser.add_argument('--branch-name', default=None, required=False, help='Branch of repo to sync') parser.add_argument('--repo_dir', default='.', required=False, help='Path to clone repo under', nargs='?') args = parser.parse_args() for line in GitPuller( args.git_url, args.repo_dir, branch=args.branch_name ).pull(): print(line) " 4684,"def date2num(d): """""" Convert datetime objects to Matplotlib dates. Parameters ---------- d : `datetime.datetime` or `numpy.datetime64` or sequences of these Returns ------- float or sequence of floats Number of days (fraction part represents hours, minutes, seconds, ms) since 0001-01-01 00:00:00 UTC, plus one. Notes ----- The addition of one here is a historical artifact. Also, note that the Gregorian calendar is assumed; this is not universal practice. For details see the module docstring. """""" if hasattr(d, ""values""): # this unpacks pandas series or dataframes... d = d.values if not np.iterable(d) and not isinstance(d, np.ndarray): # single value logic... if (isinstance(d, np.datetime64) or isinstance(d, np.timedelta64)): return _dt64_to_ordinalf(d) else: return _to_ordinalf(d) elif (isinstance(d, np.ndarray) and (np.issubdtype(d.dtype, np.datetime64) or np.issubdtype(d.dtype, np.timedelta64))): # array with all one type of datetime64 object. return _dt64_to_ordinalf(d) elif len(d): # this is a list or tuple... if (isinstance(d[0], np.datetime64) or isinstance(d[0], np.timedelta64)): return _dt64_to_ordinalf_iterable(d) return _to_ordinalf_np_vectorized(d) elif hasattr(d, 'size') and not d.size: # this is an empty return d else: return [] ","def date2num(d): """""" Convert datetime objects to Matplotlib dates. Parameters ---------- d : `datetime.datetime` or `numpy.datetime64` or sequences of these Returns ------- float or sequence of floats Number of days (fraction part represents hours, minutes, seconds, ms) since 0001-01-01 00:00:00 UTC, plus one. Notes ----- The addition of one here is a historical artifact. Also, note that the Gregorian calendar is assumed; this is not universal practice. For details see the module docstring. """""" if hasattr(d, ""values""): # this unpacks pandas series or dataframes... d = d.values if not np.iterable(d) and not isinstance(d, np.ndarray): # single value logic... if (isinstance(d, np.datetime64) or isinstance(d, np.timedelta64)): return _dt64_to_ordinalf(d) else: return _to_ordinalf(d) elif (isinstance(d, np.ndarray) and (np.issubdtype(d.dtype, np.datetime64) or np.issubdtype(d.dtype, np.timedelta64))): # array with all one type of datetime64 object. return _dt64_to_ordinalf(d) elif len(d): # this is a list or tuple... if (isinstance(d[0], np.datetime64) or isinstance(d[0], np.timedelta64)): return _dt64_to_ordinalf_iterable(d) return _to_ordinalf_np_vectorized(d) elif hasattr(d, 'size') and not d.size: # this is an empty return d else: return [] " 8247,"def check_cgi_connection(url): """""" At the moment there is no way to make an ""are you alive"" request to the cgi, so we just hit it with a HTTP get and it gives us back a 411 response. This is weird enough that it probably satisfies us for this check. """""" try: return urlopen(url).getcode() == 411 except HTTPError as e: if e.code == 411: return True warn_user(f""Connection to {url} failed with error {e}. Retrying with different url and port."") return None except (socket.error, socket.timeout, HTTPError, URLError) as e: warn_user(f""Connection to {url} failed with error {e}. Retrying with different url and port."") return None ","def check_cgi_connection(url): """""" At the moment there is no way to make an ""are you alive"" request to the cgi, so we just hit it with a HTTP get and it gives us back a 411 response. This is weird enough that it probably satisfies us for this check. """""" try: return urlopen(url).getcode() == 411 except HTTPError as e: if e.code == 411: return True warn_user(f""Connection to {url} failed with error {e}. Retrying with different url and port."") return None except (socket.error, socket.timeout, URLError) as e: warn_user(f""Connection to {url} failed with error {e}. Retrying with different url and port."") return None " 20008,"def analyze_color(rgb_img, mask, hist_plot_type=None): """"""Analyze the color properties of an image object Inputs: rgb_img = RGB image data mask = Binary mask made from selected contours hist_plot_type = 'None', 'all', 'rgb','lab' or 'hsv' Returns: analysis_image = histogram output :param rgb_img: numpy.ndarray :param mask: numpy.ndarray :param hist_plot_type: str :return analysis_images: list """""" params.device += 1 if len(np.shape(rgb_img)) < 3: fatal_error(""rgb_img must be an RGB image"") # Mask the input image masked = cv2.bitwise_and(rgb_img, rgb_img, mask=mask) # Extract the blue, green, and red channels b, g, r = cv2.split(masked) # Convert the BGR image to LAB lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB) # Extract the lightness, green-magenta, and blue-yellow channels l, m, y = cv2.split(lab) # Convert the BGR image to HSV hsv = cv2.cvtColor(masked, cv2.COLOR_BGR2HSV) # Extract the hue, saturation, and value channels h, s, v = cv2.split(hsv) # Color channel dictionary channels = {""b"": b, ""g"": g, ""r"": r, ""l"": l, ""m"": m, ""y"": y, ""h"": h, ""s"": s, ""v"": v} # Histogram plot types hist_types = {""ALL"": (""b"", ""g"", ""r"", ""l"", ""m"", ""y"", ""h"", ""s"", ""v""), ""RGB"": (""b"", ""g"", ""r""), ""LAB"": (""l"", ""m"", ""y""), ""HSV"": (""h"", ""s"", ""v"")} if hist_plot_type is not None and hist_plot_type.upper() not in hist_types: fatal_error(""The histogram plot type was "" + str(hist_plot_type) + ', but can only be one of the following: None, ""all"", ""rgb"", ""lab"", or ""hsv""!') # Store histograms, plotting colors, and plotting labels histograms = { ""b"": {""label"": ""blue"", ""graph_color"": ""blue"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""b""]], [0], mask, [256], [0, 255])]}, ""g"": {""label"": ""green"", ""graph_color"": ""forestgreen"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""g""]], [0], mask, [256], [0, 255])]}, ""r"": {""label"": ""red"", ""graph_color"": ""red"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""r""]], [0], mask, [256], [0, 255])]}, ""l"": {""label"": ""lightness"", ""graph_color"": ""dimgray"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""l""]], [0], mask, [256], [0, 255])]}, ""m"": {""label"": ""green-magenta"", ""graph_color"": ""magenta"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""m""]], [0], mask, [256], [0, 255])]}, ""y"": {""label"": ""blue-yellow"", ""graph_color"": ""yellow"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""y""]], [0], mask, [256], [0, 255])]}, ""h"": {""label"": ""hue"", ""graph_color"": ""blueviolet"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""h""]], [0], mask, [256], [0, 255])]}, ""s"": {""label"": ""saturation"", ""graph_color"": ""cyan"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""s""]], [0], mask, [256], [0, 255])]}, ""v"": {""label"": ""value"", ""graph_color"": ""orange"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""v""]], [0], mask, [256], [0, 255])]} } # Create list of bin labels for 8-bit data binval = np.arange(0, 256) bin_values = [l for l in binval] analysis_image = None # Create a dataframe of bin labels and histogram data dataset = pd.DataFrame({'bins': binval, 'blue': histograms[""b""][""hist""], 'green': histograms[""g""][""hist""], 'red': histograms[""r""][""hist""], 'lightness': histograms[""l""][""hist""], 'green-magenta': histograms[""m""][""hist""], 'blue-yellow': histograms[""y""][""hist""], 'hue': histograms[""h""][""hist""], 'saturation': histograms[""s""][""hist""], 'value': histograms[""v""][""hist""]}) # Make the histogram figure using plotnine if hist_plot_type is not None: if hist_plot_type.upper() == 'RGB': df_rgb = pd.melt(dataset, id_vars=['bins'], value_vars=['blue', 'green', 'red'], var_name='Color Channel', value_name='Pixels') hist_fig = (ggplot(df_rgb, aes(x='bins', y='Pixels', color='Color Channel')) + geom_line() + scale_x_continuous(breaks=list(range(0, 256, 25))) + scale_color_manual(['blue', 'green', 'red']) ) elif hist_plot_type.upper() == 'LAB': df_lab = pd.melt(dataset, id_vars=['bins'], value_vars=['lightness', 'green-magenta', 'blue-yellow'], var_name='Color Channel', value_name='Pixels') hist_fig = (ggplot(df_lab, aes(x='bins', y='Pixels', color='Color Channel')) + geom_line() + scale_x_continuous(breaks=list(range(0, 256, 25))) + scale_color_manual(['yellow', 'magenta', 'dimgray']) ) elif hist_plot_type.upper() == 'HSV': df_hsv = pd.melt(dataset, id_vars=['bins'], value_vars=['hue', 'saturation', 'value'], var_name='Color Channel', value_name='Pixels') hist_fig = (ggplot(df_hsv, aes(x='bins', y='Pixels', color='Color Channel')) + geom_line() + scale_x_continuous(breaks=list(range(0, 256, 25))) + scale_color_manual(['blueviolet', 'cyan', 'orange']) ) elif hist_plot_type.upper() == 'ALL': s = pd.Series(['blue', 'green', 'red', 'lightness', 'green-magenta', 'blue-yellow', 'hue', 'saturation', 'value'], dtype=""category"") color_channels = ['blue', 'yellow', 'green', 'magenta', 'blueviolet', 'dimgray', 'red', 'cyan', 'orange'] df_all = pd.melt(dataset, id_vars=['bins'], value_vars=s, var_name='Color Channel', value_name='Pixels') hist_fig = (ggplot(df_all, aes(x='bins', y='Pixels', color='Color Channel')) + geom_line() + scale_x_continuous(breaks=list(range(0, 256, 25))) + scale_color_manual(color_channels) ) analysis_image = hist_fig # Hue values of zero are red but are also the value for pixels where hue is undefined # The hue value of a pixel will be undefined when the color values are saturated # Therefore, hue values of zero are excluded from the calculations below # Calculate the median hue value # The median is rescaled from the encoded 0-179 range to the 0-359 degree range hue_median = np.median(h[np.where(h > 0)]) * 2 # Calculate the circular mean and standard deviation of the encoded hue values # The mean and standard-deviation are rescaled from the encoded 0-179 range to the 0-359 degree range hue_circular_mean = stats.circmean(h[np.where(h > 0)], high=179, low=0) * 2 hue_circular_std = stats.circstd(h[np.where(h > 0)], high=179, low=0) * 2 # Store into lists instead for pipeline and print_results # stats_dict = {'mean': circular_mean, 'std' : circular_std, 'median': median} # Plot or print the histogram if hist_plot_type is not None: if params.debug == 'print': hist_fig.save(os.path.join(params.debug_outdir, str(params.device) + '_analyze_color_hist.png')) elif params.debug == 'plot': print(hist_fig) # Store into global measurements # RGB signal values are in an unsigned 8-bit scale of 0-255 rgb_values = [i for i in range(0, 256)] # Hue values are in a 0-359 degree scale, every 2 degrees at the midpoint of the interval hue_values = [i * 2 + 1 for i in range(0, 180)] # Percentage values on a 0-100 scale (lightness, saturation, and value) percent_values = [round((i / 255) * 100, 2) for i in range(0, 256)] # Diverging values on a -128 to 127 scale (green-magenta and blue-yellow) diverging_values = [i for i in range(-128, 128)] if hist_plot_type is not None: if hist_plot_type.upper() == 'RGB': outputs.add_observation(variable='blue_frequencies', trait='blue frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""b""][""hist""], label=rgb_values) outputs.add_observation(variable='green_frequencies', trait='green frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""g""][""hist""], label=rgb_values) outputs.add_observation(variable='red_frequencies', trait='red frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""r""][""hist""], label=rgb_values) elif hist_plot_type.upper() == 'LAB': outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""l""][""hist""], label=percent_values) outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""m""][""hist""], label=diverging_values) outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""y""][""hist""], label=diverging_values) elif hist_plot_type.upper() == 'HSV': outputs.add_observation(variable='hue_frequencies', trait='hue frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""h""][""hist""][0:180], label=hue_values) outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""s""][""hist""], label=percent_values) outputs.add_observation(variable='value_frequencies', trait='value frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""v""][""hist""], label=percent_values) elif hist_plot_type.upper() == 'ALL': outputs.add_observation(variable='blue_frequencies', trait='blue frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""b""][""hist""], label=rgb_values) outputs.add_observation(variable='green_frequencies', trait='green frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""g""][""hist""], label=rgb_values) outputs.add_observation(variable='red_frequencies', trait='red frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""r""][""hist""], label=rgb_values) outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""l""][""hist""], label=percent_values) outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""m""][""hist""], label=diverging_values) outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""y""][""hist""], label=diverging_values) outputs.add_observation(variable='hue_frequencies', trait='hue frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""h""][""hist""][0:180], label=hue_values) outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""s""][""hist""], label=percent_values) outputs.add_observation(variable='value_frequencies', trait='value frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""v""][""hist""], label=percent_values) # Always save hue stats outputs.add_observation(variable='hue_circular_mean', trait='hue circular mean', method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float, value=hue_circular_mean, label='degrees') outputs.add_observation(variable='hue_circular_std', trait='hue circular standard deviation', method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float, value=hue_median, label='degrees') outputs.add_observation(variable='hue_median', trait='hue median', method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float, value=hue_median, label='degrees') # Store images outputs.images.append([analysis_image]) return analysis_image ","def analyze_color(rgb_img, mask, hist_plot_type=None): """"""Analyze the color properties of an image object Inputs: rgb_img = RGB image data mask = Binary mask made from selected contours hist_plot_type = 'None', 'all', 'rgb','lab' or 'hsv' Returns: analysis_image = histogram output :param rgb_img: numpy.ndarray :param mask: numpy.ndarray :param hist_plot_type: str :return analysis_images: list """""" params.device += 1 if len(np.shape(rgb_img)) < 3: fatal_error(""rgb_img must be an RGB image"") # Mask the input image masked = cv2.bitwise_and(rgb_img, rgb_img, mask=mask) # Extract the blue, green, and red channels b, g, r = cv2.split(masked) # Convert the BGR image to LAB lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB) # Extract the lightness, green-magenta, and blue-yellow channels l, m, y = cv2.split(lab) # Convert the BGR image to HSV hsv = cv2.cvtColor(masked, cv2.COLOR_BGR2HSV) # Extract the hue, saturation, and value channels h, s, v = cv2.split(hsv) # Color channel dictionary channels = {""b"": b, ""g"": g, ""r"": r, ""l"": l, ""m"": m, ""y"": y, ""h"": h, ""s"": s, ""v"": v} # Histogram plot types hist_types = {""ALL"": (""b"", ""g"", ""r"", ""l"", ""m"", ""y"", ""h"", ""s"", ""v""), ""RGB"": (""b"", ""g"", ""r""), ""LAB"": (""l"", ""m"", ""y""), ""HSV"": (""h"", ""s"", ""v"")} if hist_plot_type is not None and hist_plot_type.upper() not in hist_types: fatal_error(""The histogram plot type was "" + str(hist_plot_type) + ', but can only be one of the following: None, ""all"", ""rgb"", ""lab"", or ""hsv""!') # Store histograms, plotting colors, and plotting labels histograms = { ""b"": {""label"": ""blue"", ""graph_color"": ""blue"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""b""]], [0], mask, [256], [0, 255])]}, ""g"": {""label"": ""green"", ""graph_color"": ""forestgreen"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""g""]], [0], mask, [256], [0, 255])]}, ""r"": {""label"": ""red"", ""graph_color"": ""red"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""r""]], [0], mask, [256], [0, 255])]}, ""l"": {""label"": ""lightness"", ""graph_color"": ""dimgray"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""l""]], [0], mask, [256], [0, 255])]}, ""m"": {""label"": ""green-magenta"", ""graph_color"": ""magenta"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""m""]], [0], mask, [256], [0, 255])]}, ""y"": {""label"": ""blue-yellow"", ""graph_color"": ""yellow"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""y""]], [0], mask, [256], [0, 255])]}, ""h"": {""label"": ""hue"", ""graph_color"": ""blueviolet"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""h""]], [0], mask, [256], [0, 255])]}, ""s"": {""label"": ""saturation"", ""graph_color"": ""cyan"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""s""]], [0], mask, [256], [0, 255])]}, ""v"": {""label"": ""value"", ""graph_color"": ""orange"", ""hist"": [float(l[0]) for l in cv2.calcHist([channels[""v""]], [0], mask, [256], [0, 255])]} } # Create list of bin labels for 8-bit data binval = np.arange(0, 256) bin_values = [l for l in binval] analysis_image = None # Create a dataframe of bin labels and histogram data dataset = pd.DataFrame({'bins': binval, 'blue': histograms[""b""][""hist""], 'green': histograms[""g""][""hist""], 'red': histograms[""r""][""hist""], 'lightness': histograms[""l""][""hist""], 'green-magenta': histograms[""m""][""hist""], 'blue-yellow': histograms[""y""][""hist""], 'hue': histograms[""h""][""hist""], 'saturation': histograms[""s""][""hist""], 'value': histograms[""v""][""hist""]}) # Make the histogram figure using plotnine if hist_plot_type is not None: if hist_plot_type.upper() == 'RGB': df_rgb = pd.melt(dataset, id_vars=['bins'], value_vars=['blue', 'green', 'red'], var_name='Color Channel', value_name='Pixels') hist_fig = (ggplot(df_rgb, aes(x='bins', y='Pixels', color='Color Channel')) + geom_line() + scale_x_continuous(breaks=list(range(0, 256, 25))) + scale_color_manual(['blue', 'green', 'red']) ) elif hist_plot_type.upper() == 'LAB': df_lab = pd.melt(dataset, id_vars=['bins'], value_vars=['lightness', 'green-magenta', 'blue-yellow'], var_name='Color Channel', value_name='Pixels') hist_fig = (ggplot(df_lab, aes(x='bins', y='Pixels', color='Color Channel')) + geom_line() + scale_x_continuous(breaks=list(range(0, 256, 25))) + scale_color_manual(['yellow', 'magenta', 'dimgray']) ) elif hist_plot_type.upper() == 'HSV': df_hsv = pd.melt(dataset, id_vars=['bins'], value_vars=['hue', 'saturation', 'value'], var_name='Color Channel', value_name='Pixels') hist_fig = (ggplot(df_hsv, aes(x='bins', y='Pixels', color='Color Channel')) + geom_line() + scale_x_continuous(breaks=list(range(0, 256, 25))) + scale_color_manual(['blueviolet', 'cyan', 'orange']) ) elif hist_plot_type.upper() == 'ALL': s = pd.Series(['blue', 'green', 'red', 'lightness', 'green-magenta', 'blue-yellow', 'hue', 'saturation', 'value'], dtype=""category"") color_channels = ['blue', 'yellow', 'green', 'magenta', 'blueviolet', 'dimgray', 'red', 'cyan', 'orange'] df_all = pd.melt(dataset, id_vars=['bins'], value_vars=s, var_name='Color Channel', value_name='Pixels') hist_fig = (ggplot(df_all, aes(x='bins', y='Pixels', color='Color Channel')) + geom_line() + scale_x_continuous(breaks=list(range(0, 256, 25))) + scale_color_manual(color_channels) ) analysis_image = hist_fig # Hue values of zero are red but are also the value for pixels where hue is undefined # The hue value of a pixel will be undefined when the color values are saturated # Therefore, hue values of zero are excluded from the calculations below # Calculate the median hue value # The median is rescaled from the encoded 0-179 range to the 0-359 degree range hue_median = np.median(h[np.where(h > 0)]) * 2 # Calculate the circular mean and standard deviation of the encoded hue values # The mean and standard-deviation are rescaled from the encoded 0-179 range to the 0-359 degree range hue_circular_mean = stats.circmean(h[np.where(h > 0)], high=179, low=0) * 2 hue_circular_std = stats.circstd(h[np.where(h > 0)], high=179, low=0) * 2 # Store into lists instead for pipeline and print_results # stats_dict = {'mean': circular_mean, 'std' : circular_std, 'median': median} # Plot or print the histogram if hist_plot_type is not None: if params.debug == 'print': hist_fig.save(os.path.join(params.debug_outdir, str(params.device) + '_analyze_color_hist.png')) elif params.debug == 'plot': print(hist_fig) # Store into global measurements # RGB signal values are in an unsigned 8-bit scale of 0-255 rgb_values = [i for i in range(0, 256)] # Hue values are in a 0-359 degree scale, every 2 degrees at the midpoint of the interval hue_values = [i * 2 + 1 for i in range(0, 180)] # Percentage values on a 0-100 scale (lightness, saturation, and value) percent_values = [round((i / 255) * 100, 2) for i in range(0, 256)] # Diverging values on a -128 to 127 scale (green-magenta and blue-yellow) diverging_values = [i for i in range(-128, 128)] if hist_plot_type is not None: if hist_plot_type.upper() == 'RGB': outputs.add_observation(variable='blue_frequencies', trait='blue frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""b""][""hist""], label=rgb_values) outputs.add_observation(variable='green_frequencies', trait='green frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""g""][""hist""], label=rgb_values) outputs.add_observation(variable='red_frequencies', trait='red frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""r""][""hist""], label=rgb_values) elif hist_plot_type.upper() == 'LAB': outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""l""][""hist""], label=percent_values) outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""m""][""hist""], label=diverging_values) outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""y""][""hist""], label=diverging_values) elif hist_plot_type.upper() == 'HSV' or hist_plot_type.upper() == 'ALL': outputs.add_observation(variable='hue_frequencies', trait='hue frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""h""][""hist""][0:180], label=hue_values) outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""s""][""hist""], label=percent_values) outputs.add_observation(variable='value_frequencies', trait='value frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""v""][""hist""], label=percent_values) elif hist_plot_type.upper() == 'ALL': outputs.add_observation(variable='blue_frequencies', trait='blue frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""b""][""hist""], label=rgb_values) outputs.add_observation(variable='green_frequencies', trait='green frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""g""][""hist""], label=rgb_values) outputs.add_observation(variable='red_frequencies', trait='red frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""r""][""hist""], label=rgb_values) outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""l""][""hist""], label=percent_values) outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""m""][""hist""], label=diverging_values) outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""y""][""hist""], label=diverging_values) outputs.add_observation(variable='hue_frequencies', trait='hue frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""h""][""hist""][0:180], label=hue_values) outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""s""][""hist""], label=percent_values) outputs.add_observation(variable='value_frequencies', trait='value frequencies', method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list, value=histograms[""v""][""hist""], label=percent_values) # Always save hue stats outputs.add_observation(variable='hue_circular_mean', trait='hue circular mean', method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float, value=hue_circular_mean, label='degrees') outputs.add_observation(variable='hue_circular_std', trait='hue circular standard deviation', method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float, value=hue_median, label='degrees') outputs.add_observation(variable='hue_median', trait='hue median', method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float, value=hue_median, label='degrees') # Store images outputs.images.append([analysis_image]) return analysis_image " 7559,"def paranormal(center, *, std=None, var=None, ivar=None, n_samples, **kwargs): """"""Create a paranormal distribution. Not unsimilar to https://jech.bmj.com/content/60/1/6 . Parameters ---------- center : `~astropy.units.Quantity` The center of this distribution std : `~astropy.units.Quantity` or `None` The standard deviation/σ of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``var`` or ``ivar`` are set). var : `~astropy.units.Quantity` or `None` The variance of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``std`` or ``ivar`` are set). ivar : `~astropy.units.Quantity` or `None` The inverse variance of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``std`` or ``var`` are set). n_samples : int The number of Monte Carlo samples to use with this distribution Remaining keywords are passed into the constructor of the `Distribution` subclass. Returns ------- distr : `Distribution` The sampled paranormal distribution with a ``plot`` method. Examples -------- A simple paranormal plot. .. plot:: :include-source: import numpy as np from astropy import uncertainty as unc np.random.seed(12345) # ensures reproducible example numbers a = unc.paranormal(1, std=30, n_samples=10000) a.plot() """""" center = np.asanyarray(center) # Only one allowed, as per original figure. # Somehow Quantity creates a mysterious QuantityParanormalDistribution # that I cannot control. if center.size != 1: raise ValueError('oooOOOooOOoooo') if var is not None: if std is None: std = np.asanyarray(var)**0.5 else: raise ValueError('normal cannot take both std and var') if ivar is not None: if std is None: std = np.asanyarray(ivar)**-0.5 else: raise ValueError('normal cannot take both ivar and ' 'and std or var') if std is None: raise ValueError('normal requires one of std, var, or ivar') else: std = np.asanyarray(std) randshape = np.broadcast(std, center).shape + (n_samples,) samples = center[..., np.newaxis] + np.random.randn(*randshape) * std[..., np.newaxis] # The following is a bit convoluted to get around the __new__ magic. obj = Distribution(samples, **kwargs) def plot(self): """"""Plot paranormal distribution."""""" import matplotlib.pyplot as plt from matplotlib.patches import Ellipse from astropy.uncertainty.distributions import normal from astropy.visualization import quantity_support n_bins = 50 x = self.pdf_mean() dx = self.pdf_std() dx2 = dx * 0.3 new_std = dx * 0.3 new_n = int(self.n_samples * 0.2) left = normal(x - dx, std=new_std, n_samples=new_n) right = normal(x + dx, std=new_std, n_samples=new_n) hist_kwargs = {'bins': n_bins, 'histtype': 'step', 'fill': False, 'ec': 'k'} with quantity_support(): h = plt.hist(self.distribution, **hist_kwargs) plt.hist(left.distribution, **hist_kwargs) plt.hist(right.distribution, **hist_kwargs) y_max = h[0].max() y_eye = 0.7 * y_max w_eye = 0.15 * dx h_eye = 0.1 * y_max ax = plt.gca() ax.add_patch(Ellipse(xy=(x - dx2, y_eye), width=w_eye, height=h_eye, edgecolor='k', fc='k')) ax.add_patch(Ellipse(xy=(x + dx2, y_eye), width=w_eye, height=h_eye, edgecolor='k', fc='k')) plt.show() setattr(obj.__class__, 'plot', plot) return obj ","def paranormal(center, *, std=None, var=None, ivar=None, n_samples, **kwargs): """"""Create a paranormal distribution. Not unsimilar to https://jech.bmj.com/content/60/1/6 . Parameters ---------- center : `~astropy.units.Quantity` The center of this distribution std : `~astropy.units.Quantity` or `None` The standard deviation/σ of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``var`` or ``ivar`` are set). var : `~astropy.units.Quantity` or `None` The variance of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``std`` or ``ivar`` are set). ivar : `~astropy.units.Quantity` or `None` The inverse variance of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``std`` or ``var`` are set). n_samples : int The number of Monte Carlo samples to use with this distribution Remaining keywords are passed into the constructor of the `Distribution` subclass. Returns ------- distr : `Distribution` The sampled paranormal distribution with a ``plot`` method. Examples -------- A simple paranormal plot. .. plot:: :include-source: import numpy as np from astropy import uncertainty as unc np.random.seed(12345) # ensures reproducible example numbers a = unc.paranormal(1, std=30, n_samples=10000) a.plot() """""" center = np.asanyarray(center) # Only one allowed, as per original figure. # Somehow Quantity creates a mysterious QuantityParanormalDistribution # that I cannot control. if center.size != 1: raise ValueError('👻') if var is not None: if std is None: std = np.asanyarray(var)**0.5 else: raise ValueError('normal cannot take both std and var') if ivar is not None: if std is None: std = np.asanyarray(ivar)**-0.5 else: raise ValueError('normal cannot take both ivar and ' 'and std or var') if std is None: raise ValueError('normal requires one of std, var, or ivar') else: std = np.asanyarray(std) randshape = np.broadcast(std, center).shape + (n_samples,) samples = center[..., np.newaxis] + np.random.randn(*randshape) * std[..., np.newaxis] # The following is a bit convoluted to get around the __new__ magic. obj = Distribution(samples, **kwargs) def plot(self): """"""Plot paranormal distribution."""""" import matplotlib.pyplot as plt from matplotlib.patches import Ellipse from astropy.uncertainty.distributions import normal from astropy.visualization import quantity_support n_bins = 50 x = self.pdf_mean() dx = self.pdf_std() dx2 = dx * 0.3 new_std = dx * 0.3 new_n = int(self.n_samples * 0.2) left = normal(x - dx, std=new_std, n_samples=new_n) right = normal(x + dx, std=new_std, n_samples=new_n) hist_kwargs = {'bins': n_bins, 'histtype': 'step', 'fill': False, 'ec': 'k'} with quantity_support(): h = plt.hist(self.distribution, **hist_kwargs) plt.hist(left.distribution, **hist_kwargs) plt.hist(right.distribution, **hist_kwargs) y_max = h[0].max() y_eye = 0.7 * y_max w_eye = 0.15 * dx h_eye = 0.1 * y_max ax = plt.gca() ax.add_patch(Ellipse(xy=(x - dx2, y_eye), width=w_eye, height=h_eye, edgecolor='k', fc='k')) ax.add_patch(Ellipse(xy=(x + dx2, y_eye), width=w_eye, height=h_eye, edgecolor='k', fc='k')) plt.show() setattr(obj.__class__, 'plot', plot) return obj " 21301,"def start_active_span_from_edu( edu_content, operation_name, references=[], tags=None, start_time=None, ignore_active_span=False, finish_on_close=True, ): """""" Extracts a span context from an edu and uses it to start a new active span Args: edu_content (Dict): and edu_content with a `context` field whose value is canonical json for a dict which contains opentracing information. """""" if opentracing is None: return _noop_context_manager() carrier = json.loads(edu_content.get(""context"", ""{}"")).get(""opentracing"", {}) context = opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier) _references = [ opentracing.child_of(span_context_from_string(x)) for x in carrier.get(""references"", []) ] # For some reason jaeger decided not to support the visualization of multiple parent # spans or explicitely show references. I include the span context as a tag here as # an aid to people debugging but it's really not an ideal solution. references += _references scope = opentracing.tracer.start_active_span( operation_name, child_of=context, references=references, tags=tags, start_time=start_time, ignore_active_span=ignore_active_span, finish_on_close=finish_on_close, ) scope.span.set_tag(""references"", carrier.get(""references"", [])) return scope ","def start_active_span_from_edu( edu_content, operation_name, references=[], tags=None, start_time=None, ignore_active_span=False, finish_on_close=True, ): """""" Extracts a span context from an edu and uses it to start a new active span Args: edu_content (dict): and edu_content with a `context` field whose value is canonical json for a dict which contains opentracing information. """""" if opentracing is None: return _noop_context_manager() carrier = json.loads(edu_content.get(""context"", ""{}"")).get(""opentracing"", {}) context = opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier) _references = [ opentracing.child_of(span_context_from_string(x)) for x in carrier.get(""references"", []) ] # For some reason jaeger decided not to support the visualization of multiple parent # spans or explicitely show references. I include the span context as a tag here as # an aid to people debugging but it's really not an ideal solution. references += _references scope = opentracing.tracer.start_active_span( operation_name, child_of=context, references=references, tags=tags, start_time=start_time, ignore_active_span=ignore_active_span, finish_on_close=finish_on_close, ) scope.span.set_tag(""references"", carrier.get(""references"", [])) return scope " 4465,"def _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None, extra_points=None, dig_ch_pos=None, coord_frame='head'): """"""Construct digitizer info for the info. Parameters ---------- nasion : array-like | numpy.ndarray, shape (3,) | None Point designated as the nasion point. lpa : array-like | numpy.ndarray, shape (3,) | None Point designated as the left auricular point. rpa : array-like | numpy.ndarray, shape (3,) | None Point designated as the right auricular point. hpi : array-like | numpy.ndarray, shape (n_points, 3) | None Points designated as head position indicator points. extra_points : array-like | numpy.ndarray, shape (n_points, 3) Points designed as the headshape points. dig_ch_pos : dict Dict of EEG channel positions. coord_frame : str The coordinate frame of the points. Usually this is ""unknown"" for native digitizer space. Defaults to ""head"". Returns ------- dig : list of dicts A container of DigPoints to be added to the info['dig']. """""" coord_frame = _coord_frame_const(coord_frame) dig = [] if lpa is not None: lpa = np.asarray(lpa) if lpa.shape != (3,): raise ValueError('LPA should have the shape (3,) instead of %s' % (lpa.shape,)) dig.append({'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA, 'kind': FIFF.FIFFV_POINT_CARDINAL, 'coord_frame': coord_frame}) if nasion is not None: nasion = np.asarray(nasion) if nasion.shape != (3,): raise ValueError('Nasion should have the shape (3,) instead of %s' % (nasion.shape,)) dig.append({'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION, 'kind': FIFF.FIFFV_POINT_CARDINAL, 'coord_frame': coord_frame}) if rpa is not None: rpa = np.asarray(rpa) if rpa.shape != (3,): raise ValueError('RPA should have the shape (3,) instead of %s' % (rpa.shape,)) dig.append({'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA, 'kind': FIFF.FIFFV_POINT_CARDINAL, 'coord_frame': coord_frame}) if hpi is not None: hpi = np.asarray(hpi) if hpi.ndim != 2 or hpi.shape[1] != 3: raise ValueError('HPI should have the shape (n_points, 3) instead ' 'of %s' % (hpi.shape,)) for idx, point in enumerate(hpi): dig.append({'r': point, 'ident': idx + 1, 'kind': FIFF.FIFFV_POINT_HPI, 'coord_frame': coord_frame}) if extra_points is not None: extra_points = np.asarray(extra_points) if len(extra_points) and extra_points.shape[1] != 3: raise ValueError('Points should have the shape (n_points, 3) ' 'instead of %s' % (extra_points.shape,)) for idx, point in enumerate(extra_points): dig.append({'r': point, 'ident': idx + 1, 'kind': FIFF.FIFFV_POINT_EXTRA, 'coord_frame': coord_frame}) if dig_ch_pos is not None: try: # use the last 3 as int if possible (e.g., EEG001->1) idents = [] for key, value in dig_ch_pos.items(): _validate_type(key, str, 'dig_ch_pos') _validate_type(value, (np.ndarray, list, tuple), 'dig_ch_pos') if isinstance(value, (list, tuple)): value = np.array(value) dig_ch_pos[key] = value if value.dtype == int or value.dtype == np.float32: value = value.astype(np.float64) dig_ch_pos[key] = value if value.shape != (3, ) or value.dtype != np.float64: raise RuntimeError(""The position should be a 1D array of "" ""floats [x, y, z]."") idents.append(int(key[-3:])) except ValueError: # and if any conversion fails, simply use arange idents = np.arange(1, len(dig_ch_pos) + 1) for key, ident in zip(dig_ch_pos, idents): dig.append({'r': dig_ch_pos[key], 'ident': int(ident), 'kind': FIFF.FIFFV_POINT_EEG, 'coord_frame': coord_frame}) return _format_dig_points(dig) ","def _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None, extra_points=None, dig_ch_pos=None, coord_frame='head'): """"""Construct digitizer info for the info. Parameters ---------- nasion : array-like | numpy.ndarray, shape (3,) | None Point designated as the nasion point. lpa : array-like | numpy.ndarray, shape (3,) | None Point designated as the left auricular point. rpa : array-like | numpy.ndarray, shape (3,) | None Point designated as the right auricular point. hpi : array-like | numpy.ndarray, shape (n_points, 3) | None Points designated as head position indicator points. extra_points : array-like | numpy.ndarray, shape (n_points, 3) Points designed as the headshape points. dig_ch_pos : dict Dict of EEG channel positions. coord_frame : str The coordinate frame of the points. Usually this is ""unknown"" for native digitizer space. Defaults to ""head"". Returns ------- dig : list of dicts A container of DigPoints to be added to the info['dig']. """""" coord_frame = _coord_frame_const(coord_frame) dig = [] if lpa is not None: lpa = np.asarray(lpa) if lpa.shape != (3,): raise ValueError('LPA should have the shape (3,) instead of %s' % (lpa.shape,)) dig.append({'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA, 'kind': FIFF.FIFFV_POINT_CARDINAL, 'coord_frame': coord_frame}) if nasion is not None: nasion = np.asarray(nasion) if nasion.shape != (3,): raise ValueError('Nasion should have the shape (3,) instead of %s' % (nasion.shape,)) dig.append({'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION, 'kind': FIFF.FIFFV_POINT_CARDINAL, 'coord_frame': coord_frame}) if rpa is not None: rpa = np.asarray(rpa) if rpa.shape != (3,): raise ValueError('RPA should have the shape (3,) instead of %s' % (rpa.shape,)) dig.append({'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA, 'kind': FIFF.FIFFV_POINT_CARDINAL, 'coord_frame': coord_frame}) if hpi is not None: hpi = np.asarray(hpi) if hpi.ndim != 2 or hpi.shape[1] != 3: raise ValueError('HPI should have the shape (n_points, 3) instead ' 'of %s' % (hpi.shape,)) for idx, point in enumerate(hpi): dig.append({'r': point, 'ident': idx + 1, 'kind': FIFF.FIFFV_POINT_HPI, 'coord_frame': coord_frame}) if extra_points is not None: extra_points = np.asarray(extra_points) if len(extra_points) and extra_points.shape[1] != 3: raise ValueError('Points should have the shape (n_points, 3) ' 'instead of %s' % (extra_points.shape,)) for idx, point in enumerate(extra_points): dig.append({'r': point, 'ident': idx + 1, 'kind': FIFF.FIFFV_POINT_EXTRA, 'coord_frame': coord_frame}) if dig_ch_pos is not None: try: # use the last 3 as int if possible (e.g., EEG001->1) idents = [] for key, value in dig_ch_pos.items(): _validate_type(key, str, 'dig_ch_pos') _validate_type(value, (np.ndarray, list, tuple), 'dig_ch_pos') if isinstance(value, (list, tuple)): value = np.array(value) dig_ch_pos[key] = value if value.dtype == int or value.dtype == np.float32: value = value.astype(np.float64) dig_ch_pos[key] = value if value.shape != (3, ) or value.dtype != np.float64: raise RuntimeError(""The position should be a 1D array of "" f""3 floats. Get shape {value.shape}"") idents.append(int(key[-3:])) except ValueError: # and if any conversion fails, simply use arange idents = np.arange(1, len(dig_ch_pos) + 1) for key, ident in zip(dig_ch_pos, idents): dig.append({'r': dig_ch_pos[key], 'ident': int(ident), 'kind': FIFF.FIFFV_POINT_EEG, 'coord_frame': coord_frame}) return _format_dig_points(dig) " 31437,"def bulk_dismiss_alert_command(client: Client, args: dict): """""" Deprecated by: close_false_positive_command """""" alert_ids = args.get('alert_ids') custom_filter = args.get('custom_filter') comment = args.get('comment') request_data = args_to_filter_for_dismiss_and_resolve_alerts(alert_ids, custom_filter, comment) dismissed_alerts_data = {} try: dismissed_alerts_data = client.dismiss_bulk_alerts(request_data) except Exception as e: if 'alertsNotFound' in str(e): raise DemistoException('Error: This alert id is already dismissed or does not exist.') number_of_dismissed_alerts = dismissed_alerts_data['closed_false_positive'] return CommandResults( readable_output=f'{number_of_dismissed_alerts} alerts dismissed', outputs_prefix='MicrosoftCloudAppSecurity.Alerts', outputs_key_field='_id' ) ","def bulk_dismiss_alert_command(client: Client, args: dict): """""" Deprecated: use close_false_positive_command instead """""" alert_ids = args.get('alert_ids') custom_filter = args.get('custom_filter') comment = args.get('comment') request_data = args_to_filter_for_dismiss_and_resolve_alerts(alert_ids, custom_filter, comment) dismissed_alerts_data = {} try: dismissed_alerts_data = client.dismiss_bulk_alerts(request_data) except Exception as e: if 'alertsNotFound' in str(e): raise DemistoException('Error: This alert id is already dismissed or does not exist.') number_of_dismissed_alerts = dismissed_alerts_data['closed_false_positive'] return CommandResults( readable_output=f'{number_of_dismissed_alerts} alerts dismissed', outputs_prefix='MicrosoftCloudAppSecurity.Alerts', outputs_key_field='_id' ) " 27660,"def _spike_test(stream, percent=0.99, multiplier=1e7): """""" Check for very large spikes in data and raise an error if found. :param stream: Stream to look for spikes in. :type stream: :class:`obspy.core.stream.Stream` :param percent: Percentage as a decimal to calculate range for. :type percent: float :param multiplier: Multiplier of range to define a spike. :type multiplier: float """""" list_ids = [] for tr in stream: if (tr.data > 2 * np.max(np.sort( np.abs(tr.data))[0:int(percent * len(tr.data))] ) * multiplier).sum() > 0: list_ids.append(tr.id) if list_ids != []: ids = ', '.join(list_ids) msg = ('Spikes above ' + str(multiplier) + ' of the range of ' + str(percent) + ' of the data present, check:\n' + ids + '.\n' 'This would otherwise likely result in an issue during ' + 'FFT prior to cross-correlation.\n' + 'If you think this spike is real please report ' + 'this as a bug.') print(msg) for ID in list_ids: stream.remove(stream.select(id=ID)[0]) print('%s got removed by EQcorrscan because it had spike' % ID) ","def _spike_test(stream, percent=0.99, multiplier=1e7): """""" Check for very large spikes in data and raise an error if found. :param stream: Stream to look for spikes in. :type stream: :class:`obspy.core.stream.Stream` :param percent: Percentage as a decimal to calculate range for. :type percent: float :param multiplier: Multiplier of range to define a spike. :type multiplier: float """""" list_ids = [] for tr in stream: if (tr.data > 2 * np.max(np.sort( np.abs(tr.data))[0:int(percent * len(tr.data))] ) * multiplier).sum() > 0: list_ids.append(tr.id) if list_ids != []: ids = ', '.join(list_ids) msg = ('Spikes above ' + str(multiplier) + ' of the range of ' + str(percent) + ' of the data present, check:\n' + ids + '.\n' 'This would otherwise likely result in an issue during ' + 'FFT prior to cross-correlation.\n' + 'If you think this spike is real please report ' + 'this as a bug.') Logger.warning(msg) for ID in list_ids: stream.remove(stream.select(id=ID)[0]) print('%s got removed by EQcorrscan because it had spike' % ID) " 4073,"def resource_find(filename, use_cache=True): '''Search for a resource in the list of paths. Use resource_add_path to add a custom path to the search. By default, results are cached for 60 seconds. This can be disabled using use_cache=False. .. versionchanged:: 2.0.0rc5 A default cache and the `use_cache` parameter were added. ''' if not filename: return found_filename = None if use_cache: found_filename = Cache.get('kv.resourcefind', filename) if found_filename: return found_filename if filename[:8] == 'atlas://': found_filename = filename else: abspath_filename = abspath(filename) if exists(abspath_filename): found_filename = abspath(filename) else: for path in reversed(resource_paths): abspath_filename = abspath(join(path, filename)) if exists(abspath_filename): found_filename = abspath_filename break if not found_filename and filename.startswith(""data:""): found_filename = filename if use_cache: Cache.append('kv.resourcefind', filename, found_filename) return found_filename ","def resource_find(filename, use_cache=True): '''Search for a resource in the list of paths. Use resource_add_path to add a custom path to the search. By default, results are cached for 60 seconds. This can be disabled using use_cache=False. .. versionchanged:: 2.0.0rc5 `use_cache` parameter added and made True by default. ''' if not filename: return found_filename = None if use_cache: found_filename = Cache.get('kv.resourcefind', filename) if found_filename: return found_filename if filename[:8] == 'atlas://': found_filename = filename else: abspath_filename = abspath(filename) if exists(abspath_filename): found_filename = abspath(filename) else: for path in reversed(resource_paths): abspath_filename = abspath(join(path, filename)) if exists(abspath_filename): found_filename = abspath_filename break if not found_filename and filename.startswith(""data:""): found_filename = filename if use_cache: Cache.append('kv.resourcefind', filename, found_filename) return found_filename " 12262,"def rand_super_bcsz(N=2, enforce_tp=True, rank=None, dims=None): """""" Returns a random superoperator drawn from the Bruzda et al ensemble for CPTP maps [BCSZ08]_. Note that due to finite numerical precision, for ranks less than full-rank, zero eigenvalues may become slightly negative, such that the returned operator is not actually completely positive. Parameters ---------- N : int Square root of the dimension of the superoperator to be returned. enforce_tp : bool If True, the trace-preserving condition of [BCSZ08]_ is enforced; otherwise only complete positivity is enforced. rank : int or None Rank of the sampled superoperator. If None, a full-rank superoperator is generated. dims : list Dimensions of quantum object. Used for specifying tensor structure. Default is dims=[[[N],[N]], [[N],[N]]]. Returns ------- rho : Qobj A superoperator acting on vectorized dim × dim density operators, sampled from the BCSZ distribution. """""" if dims is not None: # TODO: check! pass else: dims = [[[N],[N]], [[N],[N]]] if rank is None: rank = N**2 if rank > N**2: raise ValueError(""Rank cannot exceed superoperator dimension."") # We use mainly dense matrices here for speed in low # dimensions. In the future, it would likely be better to switch off # between sparse and dense matrices as the dimension grows. # We start with a Ginibre uniform matrix X of the appropriate rank, # and use it to construct a positive semidefinite matrix X X⁺. X = randnz((N**2, rank), norm='ginibre') # Precompute X X⁺, as we'll need it in two different places. XXdag = np.dot(X, X.T.conj()) if enforce_tp: # We do the partial trace over the first index by using dense reshape # operations, so that we can avoid bouncing to a sparse representation # and back. Y = np.einsum('ijik->jk', XXdag.reshape((N, N, N, N))) # Now we have the matrix :math: 'I \otimes Y^{-1/2}', which we can find # by doing the square root and the inverse separately. As a possible # improvement, iterative methods exist to find inverse square root # matrices directly, as this is important in statistics. Z = np.kron( np.eye(N), sqrtm(la.inv(Y)) ) # Finally, we dot everything together and pack it into a Qobj, # marking the dimensions as that of a type=super (that is, # with left and right compound indices, each representing # left and right indices on the underlying Hilbert space). D = Qobj(np.dot(Z, np.dot(XXdag, Z))) else: D = N * Qobj(XXdag / np.trace(XXdag)) D.dims = [ # Left dims [[N], [N]], # Right dims [[N], [N]] ] # Since [BCSZ08] gives a row-stacking Choi matrix, but QuTiP # expects a column-stacking Choi matrix, we must permute the indices. D = D.permute([[1], [0]]) D.dims = dims # Mark that we've made a Choi matrix. D.superrep = 'choi' return sr.to_super(D) ","def rand_super_bcsz(N=2, enforce_tp=True, rank=None, dims=None): """""" Returns a random superoperator drawn from the Bruzda et al ensemble for CPTP maps [BCSZ08]_. Note that due to finite numerical precision, for ranks less than full-rank, zero eigenvalues may become slightly negative, such that the returned operator is not actually completely positive. Parameters ---------- N : int Square root of the dimension of the superoperator to be returned. enforce_tp : bool If True, the trace-preserving condition of [BCSZ08]_ is enforced; otherwise only complete positivity is enforced. rank : int or None Rank of the sampled superoperator. If None, a full-rank superoperator is generated. dims : list Dimensions of quantum object. Used for specifying tensor structure. Default is dims=[[[N],[N]], [[N],[N]]]. Returns ------- rho : Qobj A superoperator acting on vectorized dim × dim density operators, sampled from the BCSZ distribution. """""" if dims is not None: # TODO: check! pass else: dims = [[[N],[N]], [[N],[N]]] if rank is None: rank = N**2 if rank > N**2: raise ValueError(""Rank cannot exceed superoperator dimension."") # We use mainly dense matrices here for speed in low # dimensions. In the future, it would likely be better to switch off # between sparse and dense matrices as the dimension grows. # We start with a Ginibre uniform matrix X of the appropriate rank, # and use it to construct a positive semidefinite matrix X X⁺. X = randnz((N**2, rank), norm='ginibre') # Precompute X X⁺, as we'll need it in two different places. XXdag = np.dot(X, X.T.conj()) if enforce_tp: # We do the partial trace over the first index by using dense reshape # operations, so that we can avoid bouncing to a sparse representation # and back. Y = np.einsum('ijik->jk', XXdag.reshape((N, N, N, N))) # Now we have the matrix :math:`\mathbb{I} \otimes Y^{-1/2}`, which we can find # by doing the square root and the inverse separately. As a possible # improvement, iterative methods exist to find inverse square root # matrices directly, as this is important in statistics. Z = np.kron( np.eye(N), sqrtm(la.inv(Y)) ) # Finally, we dot everything together and pack it into a Qobj, # marking the dimensions as that of a type=super (that is, # with left and right compound indices, each representing # left and right indices on the underlying Hilbert space). D = Qobj(np.dot(Z, np.dot(XXdag, Z))) else: D = N * Qobj(XXdag / np.trace(XXdag)) D.dims = [ # Left dims [[N], [N]], # Right dims [[N], [N]] ] # Since [BCSZ08] gives a row-stacking Choi matrix, but QuTiP # expects a column-stacking Choi matrix, we must permute the indices. D = D.permute([[1], [0]]) D.dims = dims # Mark that we've made a Choi matrix. D.superrep = 'choi' return sr.to_super(D) " 22383,"def create_paramfile(trans, uploaded_datasets): """""" Create the upload tool's JSON ""param"" file. """""" tool_params = [] json_file_path = None for uploaded_dataset in uploaded_datasets: data = uploaded_dataset.data if uploaded_dataset.type == 'composite': # we need to init metadata before the job is dispatched data.init_meta() for meta_name, meta_value in uploaded_dataset.metadata.items(): setattr(data.metadata, meta_name, meta_value) trans.sa_session.add(data) trans.sa_session.flush() params = dict(file_type=uploaded_dataset.file_type, dataset_id=data.dataset.id, dbkey=uploaded_dataset.dbkey, type=uploaded_dataset.type, metadata=uploaded_dataset.metadata, primary_file=uploaded_dataset.primary_file, composite_file_paths=uploaded_dataset.composite_files, composite_files={k: v.__dict__ for k, v in data.datatype.get_composite_files(data).items()}) else: try: is_binary = uploaded_dataset.datatype.is_binary except Exception: is_binary = None try: link_data_only = uploaded_dataset.link_data_only except Exception: link_data_only = 'copy_files' try: uuid_str = uploaded_dataset.uuid except Exception: uuid_str = None try: purge_source = uploaded_dataset.purge_source except Exception: purge_source = True try: user_ftp_dir = os.path.abspath(trans.user_ftp_dir) except Exception: user_ftp_dir = None if user_ftp_dir and uploaded_dataset.path.startswith(user_ftp_dir): uploaded_dataset.type = 'ftp_import' params = dict(file_type=uploaded_dataset.file_type, ext=uploaded_dataset.ext, name=uploaded_dataset.name, dataset_id=data.dataset.id, dbkey=uploaded_dataset.dbkey, type=uploaded_dataset.type, is_binary=is_binary, link_data_only=link_data_only, uuid=uuid_str, to_posix_lines=getattr(uploaded_dataset, ""to_posix_lines"", True), auto_decompress=getattr(uploaded_dataset, ""auto_decompress"", True), purge_source=purge_source, space_to_tab=uploaded_dataset.space_to_tab, run_as_real_user=trans.app.config.external_chown_script is not None, check_content=trans.app.config.check_upload_content, path=uploaded_dataset.path) # TODO: This will have to change when we start bundling inputs. # Also, in_place above causes the file to be left behind since the # user cannot remove it unless the parent directory is writable. if link_data_only == 'copy_files' and trans.user and trans.app.config.external_chown_script: external_chown(uploaded_dataset.path, trans.user.system_user_pwent(trans.app.config.real_system_username), trans.app.config.external_chown_script, description=""uploaded file"") tool_params.append(params) with tempfile.NamedTemporaryFile(mode=""w"", prefix='upload_params_', delete=False) as fh: json_file_path = fh.name dump(tool_params, fh) return json_file_path ","def create_paramfile(trans, uploaded_datasets): """""" Create the upload tool's JSON ""param"" file. """""" tool_params = [] json_file_path = None for uploaded_dataset in uploaded_datasets: data = uploaded_dataset.data if uploaded_dataset.type == 'composite': # we need to init metadata before the job is dispatched data.init_meta() for meta_name, meta_value in uploaded_dataset.metadata.items(): setattr(data.metadata, meta_name, meta_value) trans.sa_session.add(data) trans.sa_session.flush() params = dict(file_type=uploaded_dataset.file_type, dataset_id=data.dataset.id, dbkey=uploaded_dataset.dbkey, type=uploaded_dataset.type, metadata=uploaded_dataset.metadata, primary_file=uploaded_dataset.primary_file, composite_file_paths=uploaded_dataset.composite_files, composite_files={k: v.__dict__ for k, v in data.datatype.get_composite_files(data).items()}) else: try: is_binary = uploaded_dataset.datatype.is_binary except Exception: is_binary = None try: link_data_only = uploaded_dataset.link_data_only except Exception: link_data_only = 'copy_files' try: uuid_str = uploaded_dataset.uuid except Exception: uuid_str = None try: purge_source = uploaded_dataset.purge_source except Exception: purge_source = True try: user_ftp_dir = os.path.abspath(trans.user_ftp_dir) except Exception: user_ftp_dir = None if user_ftp_dir and uploaded_dataset.path.startswith(user_ftp_dir): uploaded_dataset.type = 'ftp_import' params = dict(file_type=uploaded_dataset.file_type, ext=uploaded_dataset.ext, name=uploaded_dataset.name, dataset_id=data.dataset.id, dbkey=uploaded_dataset.dbkey, type=uploaded_dataset.type, is_binary=is_binary, link_data_only=link_data_only, uuid=uuid_str, to_posix_lines=getattr(uploaded_dataset, ""to_posix_lines"", True), auto_decompress=getattr(uploaded_dataset, ""auto_decompress"", True), purge_source=purge_source, space_to_tab=uploaded_dataset.space_to_tab, run_as_real_user=trans.app.config.external_chown_script is not None, check_content=trans.app.config.check_upload_content, path=uploaded_dataset.path) # TODO: This will have to change when we start bundling inputs. # Also, in_place above causes the file to be left behind since the # user cannot remove it unless the parent directory is writable. if link_data_only == 'copy_files' and trans.user and trans.app.config.external_chown_script is not None: external_chown(uploaded_dataset.path, trans.user.system_user_pwent(trans.app.config.real_system_username), trans.app.config.external_chown_script, description=""uploaded file"") tool_params.append(params) with tempfile.NamedTemporaryFile(mode=""w"", prefix='upload_params_', delete=False) as fh: json_file_path = fh.name dump(tool_params, fh) return json_file_path " 30515,"def rasterize_command(): url = demisto.getArg('url') w = demisto.args().get('width', DEFAULT_W).rstrip('px') h = demisto.args().get('height', DEFAULT_H).rstrip('px') r_type = demisto.args().get('type', 'png') wait_time = int(demisto.args().get('wait_time', 0)) page_load = int(demisto.args().get('max_page_load_time', 0)) if not (url.startswith('http')): url = f'http://{url}' filename = f'url.{""pdf"" if r_type == ""pdf"" else ""png""}' # type: ignore proxy_flag = """" if PROXY: proxy_flag = f""--proxy={HTTPS_PROXY if url.startswith('https') else HTTP_PROXY}"" # type: ignore demisto.debug('rasterize proxy settings: ' + proxy_flag) output = rasterize(path=url, r_type=r_type, width=w, height=h, wait_time=wait_time, max_page_load_time=page_load) res = fileResult(filename=filename, data=output) if r_type == 'png': res['Type'] = entryTypes['image'] demisto.results(res) ","def rasterize_command(): url = demisto.getArg('url') w = demisto.args().get('width', DEFAULT_W).rstrip('px') h = demisto.args().get('height', DEFAULT_H).rstrip('px') r_type = demisto.args().get('type', 'png') wait_time = int(demisto.args().get('wait_time', 0)) page_load = int(demisto.args().get('max_page_load_time', DEFAULT_PAGE_LOAD_TIME)) if not (url.startswith('http')): url = f'http://{url}' filename = f'url.{""pdf"" if r_type == ""pdf"" else ""png""}' # type: ignore proxy_flag = """" if PROXY: proxy_flag = f""--proxy={HTTPS_PROXY if url.startswith('https') else HTTP_PROXY}"" # type: ignore demisto.debug('rasterize proxy settings: ' + proxy_flag) output = rasterize(path=url, r_type=r_type, width=w, height=h, wait_time=wait_time, max_page_load_time=page_load) res = fileResult(filename=filename, data=output) if r_type == 'png': res['Type'] = entryTypes['image'] demisto.results(res) " 26218,"def gather_lldp(module, lldpctl_docker_cmd, skip_interface_pattern_list): rc, output, err = module.run_command(lldpctl_docker_cmd) if output: output_dict = {} current_dict = {} lldp_entries = output.split(""\n"") skip_interface_pattern_str = '(?:% s)' % '|'.join(skip_interface_pattern_list) if skip_interface_pattern_list else None for entry in lldp_entries: if entry.startswith('lldp'): path, value = entry.strip().split(""="", 1) path = path.split(""."") if skip_interface_pattern_list and re.match(skip_interface_pattern_str, path[1]): continue path_components, final = path[:-1], path[-1] else: value = current_dict[final] + '\n' + entry current_dict = output_dict for path_component in path_components: current_dict[path_component] = current_dict.get(path_component, {}) current_dict = current_dict[path_component] current_dict[final] = value return output_dict ","def gather_lldp(module, lldpctl_docker_cmd, skip_interface_pattern_list): rc, output, err = module.run_command(lldpctl_docker_cmd) if output: output_dict = {} current_dict = {} lldp_entries = output.split(""\n"") skip_interface_pattern_str = ""(?:% s)"" % '|'.join(skip_interface_pattern_list) if skip_interface_pattern_list else None for entry in lldp_entries: if entry.startswith('lldp'): path, value = entry.strip().split(""="", 1) path = path.split(""."") if skip_interface_pattern_list and re.match(skip_interface_pattern_str, path[1]): continue path_components, final = path[:-1], path[-1] else: value = current_dict[final] + '\n' + entry current_dict = output_dict for path_component in path_components: current_dict[path_component] = current_dict.get(path_component, {}) current_dict = current_dict[path_component] current_dict[final] = value return output_dict " 2216,"def test_checksubparams_too_many_subsamples(): X, y, w, c = gen_toy_problem_1d() theil_sen = TheilSenRegressor(n_subsamples=101, random_state=0) with pytest.error(ValueError): theil_sen.fit(X, y) ","def test_checksubparams_too_many_subsamples(): X, y, w, c = gen_toy_problem_1d() theil_sen = TheilSenRegressor(n_subsamples=101, random_state=0) with pytest.raises(ValueError): theil_sen.fit(X, y) " 32344,"def main(): try: if demisto.command() == ""fetch-indicators"": fetch_indicators() elif demisto.command() == ""reset-data-stream"": days = demisto.getArg(""reset"") days = int(days) new_date = reset_data_stream(int(days)) demisto.results(new_date) elif demisto.command() == ""test-module"": connect() return_results(""ok"") except Exception as e: demisto.error(traceback.format_exc()) return_error(f""Failed to execute {demisto.command()} command.\nError:\n{str(e)}"") ","def main(): try: if demisto.command() == ""fetch-indicators"": fetch_indicators() elif demisto.command() == ""reset-data-stream"": days = demisto.getArg(""reset"") days = int(days) new_date = reset_data_stream(int(days)) return_results(new_date) elif demisto.command() == ""test-module"": connect() return_results(""ok"") except Exception as e: demisto.error(traceback.format_exc()) return_error(f""Failed to execute {demisto.command()} command.\nError:\n{str(e)}"") " 31395,"def cisco_stealthwatch_get_tag_hourly_traffic_report_command(client: Client, tenant_id: str, tag_id: str) -> CommandResults: """"""Get a tag (called host group on the Stealthwatch API) hourly traffic report Args: client (Client): Cisco Stealthwatch Client tenant_id (str): The id of the tenant to retrieve its information tag_id (str): The id of the tag to retrieve its information Returns: CommandResults: Raw response, outputs and readable outputs """""" response = client.tag_hourly_traffic(tenant_id, tag_id) outputs = [] if response.get('data'): for report in response['data'].get('data', []): report['tag_id'] = tag_id report['tenant_id'] = tenant_id value = report.get('value') report.pop('value') report.update(value) outputs.append(report) headers = ['timestamp', 'inboundByteCount', 'outboundByteCount', 'withinByteCount'] title = f'Hourly Tag Traffic Report (tenant_id: {tenant_id}, tag_id: {tag_id}):' table = tableToMarkdown(title, outputs, headers=headers, removeNull=True) return CommandResults( outputs_prefix='CiscoStealthwatch.TagHourlyTraffic', outputs_key_field=['tag_id', 'tenant_id', 'timestamp'], raw_response=response, outputs=outputs, readable_output=table ) ","def cisco_stealthwatch_get_tag_hourly_traffic_report_command(client: Client, tenant_id: str, tag_id: str) -> CommandResults: """"""Get a tag (called host group on the Stealthwatch API) hourly traffic report Args: client (Client): Cisco Stealthwatch Client tenant_id (str): The id of the tenant to retrieve its information tag_id (str): The id of the tag to retrieve its information Returns: CommandResults: Raw response, outputs and readable outputs """""" response = client.tag_hourly_traffic(tenant_id, tag_id) outputs = [] if response.get('data'): for report in response.get('data', {}).get('data', []): report['tag_id'] = tag_id report['tenant_id'] = tenant_id value = report.get('value') report.pop('value') report.update(value) outputs.append(report) headers = ['timestamp', 'inboundByteCount', 'outboundByteCount', 'withinByteCount'] title = f'Hourly Tag Traffic Report (tenant_id: {tenant_id}, tag_id: {tag_id}):' table = tableToMarkdown(title, outputs, headers=headers, removeNull=True) return CommandResults( outputs_prefix='CiscoStealthwatch.TagHourlyTraffic', outputs_key_field=['tag_id', 'tenant_id', 'timestamp'], raw_response=response, outputs=outputs, readable_output=table ) " 57662,"def main(): options = options_handler() username = options.user password = options.password ami_env = options.ami_env git_sha1 = options.git_sha1 conf_path = options.conf secret_conf_path = options.secret branch_name = options.branch ci_build_number = options.build_number servers = determine_servers_urls(ami_env) server_numeric_version = get_server_numeric_version(ami_env) prints_manager = ParallelPrintsManager(1) conf, secret_conf = load_conf_files(conf_path, secret_conf_path) secret_params = secret_conf.get('integrations', []) if secret_conf else [] username = secret_conf.get('username') if not username else username password = secret_conf.get('userPassword') if not password else password if LooseVersion(server_numeric_version) >= LooseVersion('6.0.0'): for server in servers: client = demisto_client.configure(base_url=server, username=username, password=password, verify_ssl=False) set_marketplace_gcp_bucket_for_build(client, prints_manager, branch_name, ci_build_number) print('Restarting servers to apply GCS server config ...') ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \ '""sudo systemctl restart demisto""' try: subprocess.check_output( ssh_string.format('ec2-user', server.replace('https://', '')), shell=True) except subprocess.CalledProcessError as exc: print(exc.output) print('Done restarting servers.') tests = conf['tests'] skipped_integrations_conf = conf['skipped_integrations'] all_module_instances = [] filtered_tests, filter_configured, run_all_tests = extract_filtered_tests(is_nightly=options.is_nightly) tests_for_iteration = tests if run_all_tests: # skip test button testing skipped_instance_test_message = 'Not running instance tests when {} is turned on'.format(RUN_ALL_TESTS_FORMAT) prints_manager.add_print_job(skipped_instance_test_message, print_warning, 0) tests_for_iteration = [] elif filter_configured and filtered_tests: tests_for_iteration = [test for test in tests if test.get('playbookID', '') in filtered_tests] tests_for_iteration = filter_tests_with_incompatible_version(tests_for_iteration, server_numeric_version, prints_manager) prints_manager.execute_thread_prints(0) # get a list of brand new integrations that way we filter them out to only configure instances # after updating content new_integrations_files, modified_integrations_files = get_new_and_modified_integration_files(git_sha1) new_integrations_names, modified_integrations_names = [], [] installed_content_packs_successfully = True if LooseVersion(server_numeric_version) >= LooseVersion('6.0.0'): # sleep for one minute before starting to search and install packs to ensure bucket is ready prints_manager.add_print_job('Sleeping for 1 minute...', print_warning, 0) prints_manager.execute_thread_prints(0) sleep(60) if options.nightly: threads_list = [] threads_prints_manager = ParallelPrintsManager(len(servers)) # For each server url we install content for thread_index, server_url in enumerate(servers): client = demisto_client.configure(base_url=server_url, username=username, password=password, verify_ssl=False) t = Thread(target=install_all_content_packs, kwargs={'client': client, 'server': server_url, 'prints_manager': threads_prints_manager, 'thread_index': thread_index}) threads_list.append(t) run_threads_list(threads_list) else: # install content packs in every server pack_ids = get_pack_ids_to_install() for server_url in servers: try: client = demisto_client.configure(base_url=server_url, username=username, password=password, verify_ssl=False) search_and_install_packs_and_their_dependencies(pack_ids, client, prints_manager, options.is_nightly) except Exception as exc: prints_manager.add_print_job(str(exc), print_error, 0) prints_manager.execute_thread_prints(0) installed_content_packs_successfully = False if new_integrations_files: new_integrations_names = get_integration_names_from_files(new_integrations_files) new_integrations_names_message = \ 'New Integrations Since Last Release:\n{}\n'.format('\n'.join(new_integrations_names)) prints_manager.add_print_job(new_integrations_names_message, print_warning, 0) if modified_integrations_files: modified_integrations_names = get_integration_names_from_files(modified_integrations_files) modified_integrations_names_message = \ 'Updated Integrations Since Last Release:\n{}\n'.format('\n'.join(modified_integrations_names)) prints_manager.add_print_job(modified_integrations_names_message, print_warning, 0) prints_manager.execute_thread_prints(0) # Each test is a dictionary from Tests/conf.json which may contain the following fields # ""playbookID"", ""integrations"", ""instance_names"", ""timeout"", ""nightly"", ""fromversion"", ""toversion"" # Note that only the ""playbookID"" field is required with all of the others being optional. # Most tests have an ""integrations"" field listing the integration used for that playbook # and sometimes an ""instance_names"" field which is used when there are multiple instances # of an integration that we want to configure with different configuration values. Look at # [conf.json](../conf.json) for examples brand_new_integrations = [] for test in tests_for_iteration: testing_client = demisto_client.configure(base_url=servers[0], username=username, password=password, verify_ssl=False) integrations = get_integrations_for_test(test, skipped_integrations_conf) instance_names_conf = test.get('instance_names', []) if not isinstance(instance_names_conf, list): instance_names_conf = [instance_names_conf] integrations_names = [i.get('name') for i in integrations] prints_manager.add_print_job('All Integrations for test ""{}"":'.format(test.get('playbookID')), print_warning, 0) prints_manager.add_print_job(integrations_names, print_warning, 0) new_integrations, modified_integrations, unchanged_integrations, integration_to_status = group_integrations( integrations, skipped_integrations_conf, new_integrations_names, modified_integrations_names ) integrations_msg = '\n'.join(['""{}"" - {}'.format(key, val) for key, val in integration_to_status.items()]) prints_manager.add_print_job('{}\n'.format(integrations_msg), print_warning, 0) integrations_to_configure = modified_integrations[:] integrations_to_configure.extend(unchanged_integrations) # set params for new integrations and [modified + unchanged] integrations, then add the new ones # to brand_new_integrations list for later use placeholders_map = {'%%SERVER_HOST%%': servers[0]} new_ints_params_set = set_integration_params(new_integrations, secret_params, instance_names_conf, placeholders_map) ints_to_configure_params_set = set_integration_params(integrations_to_configure, secret_params, instance_names_conf, placeholders_map) if not new_ints_params_set: prints_manager.add_print_job( 'failed setting parameters for integrations ""{}""'.format('\n'.join(new_integrations)), print_error, 0) if not ints_to_configure_params_set: prints_manager.add_print_job( 'failed setting parameters for integrations ""{}""'.format('\n'.join(integrations_to_configure)), print_error, 0) if not (new_ints_params_set and ints_to_configure_params_set): continue prints_manager.execute_thread_prints(0) brand_new_integrations.extend(new_integrations) module_instances = [] for integration in integrations_to_configure: placeholders_map = {'%%SERVER_HOST%%': servers[0]} module_instance = configure_integration_instance(integration, testing_client, prints_manager, placeholders_map) if module_instance: module_instances.append(module_instance) all_module_instances.extend(module_instances) preupdate_fails = set() postupdate_fails = set() preupdate_success = set() postupdate_success = set() # Test all module instances (of modified + unchanged integrations) pre-updating content if all_module_instances: # only print start message if there are instances to configure prints_manager.add_print_job('Start of Instance Testing (""Test"" button) prior to Content Update:', print_warning, 0) else: prints_manager.add_print_job('No integrations to configure for the chosen tests. (Pre-update)', print_warning, 0) prints_manager.execute_thread_prints(0) for instance in all_module_instances: testing_client = demisto_client.configure(base_url=servers[0], username=username, password=password, verify_ssl=False) integration_of_instance = instance.get('brand', '') instance_name = instance.get('name', '') msg = 'Testing (""Test"" button) for instance ""{}"" of integration ""{}"".'.format(instance_name, integration_of_instance) prints_manager.add_print_job(msg, print_color, 0, LOG_COLORS.GREEN) prints_manager.execute_thread_prints(0) # If there is a failure, __test_integration_instance will print it success, _ = __test_integration_instance(testing_client, instance, prints_manager) prints_manager.execute_thread_prints(0) if not success: preupdate_fails.add((instance_name, integration_of_instance)) else: preupdate_success.add((instance_name, integration_of_instance)) if LooseVersion(server_numeric_version) < LooseVersion('6.0.0'): threads_list = [] threads_prints_manager = ParallelPrintsManager(len(servers)) # For each server url we install content for thread_index, server_url in enumerate(servers): client = demisto_client.configure(base_url=server_url, username=username, password=password, verify_ssl=False) t = Thread(target=update_content_on_demisto_instance, kwargs={'client': client, 'server': server_url, 'ami_name': ami_env, 'prints_manager': threads_prints_manager, 'thread_index': thread_index}) threads_list.append(t) run_threads_list(threads_list) # configure instances for new integrations new_integration_module_instances = [] for integration in brand_new_integrations: placeholders_map = {'%%SERVER_HOST%%': servers[0]} new_integration_module_instance = configure_integration_instance(integration, testing_client, prints_manager, placeholders_map) if new_integration_module_instance: new_integration_module_instances.append(new_integration_module_instance) all_module_instances.extend(new_integration_module_instances) # After content upload has completed - test (""Test"" button) integration instances # Test all module instances (of pre-existing AND new integrations) post-updating content if all_module_instances: # only print start message if there are instances to configure prints_manager.add_print_job('Start of Instance Testing (""Test"" button) after the Content Update:', print_warning, 0) else: prints_manager.add_print_job('No integrations to configure for the chosen tests. (Post-update)', print_warning, 0) prints_manager.execute_thread_prints(0) for instance in all_module_instances: integration_of_instance = instance.get('brand', '') instance_name = instance.get('name', '') msg = 'Testing (""Test"" button) for instance ""{}"" of integration ""{}"" .'.format(instance_name, integration_of_instance) prints_manager.add_print_job(msg, print_color, 0, LOG_COLORS.GREEN) prints_manager.execute_thread_prints(0) # If there is a failure, __test_integration_instance will print it success, _ = __test_integration_instance(testing_client, instance, prints_manager) prints_manager.execute_thread_prints(0) if not success: postupdate_fails.add((instance_name, integration_of_instance)) else: postupdate_success.add((instance_name, integration_of_instance)) # reinitialize all clients since their authorization has probably expired by now for server_url in servers: client = demisto_client.configure(base_url=server_url, username=username, password=password, verify_ssl=False) __disable_integrations_instances(client, all_module_instances, prints_manager) prints_manager.execute_thread_prints(0) success = report_tests_status(preupdate_fails, postupdate_fails, preupdate_success, postupdate_success, new_integrations_names, prints_manager) prints_manager.execute_thread_prints(0) if not success or not installed_content_packs_successfully: sys.exit(2) ","def main(): options = options_handler() username = options.user password = options.password ami_env = options.ami_env git_sha1 = options.git_sha1 conf_path = options.conf secret_conf_path = options.secret branch_name = options.branch ci_build_number = options.build_number servers = determine_servers_urls(ami_env) server_numeric_version = get_server_numeric_version(ami_env) prints_manager = ParallelPrintsManager(1) conf, secret_conf = load_conf_files(conf_path, secret_conf_path) secret_params = secret_conf.get('integrations', []) if secret_conf else [] username = secret_conf.get('username') if not username else username password = secret_conf.get('userPassword') if not password else password if LooseVersion(server_numeric_version) >= LooseVersion('6.0.0'): for server in servers: client = demisto_client.configure(base_url=server, username=username, password=password, verify_ssl=False) set_marketplace_gcp_bucket_for_build(client, prints_manager, branch_name, ci_build_number) print('Restarting servers to apply GCS server config ...') ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \ '""sudo systemctl restart demisto""' try: subprocess.check_output( ssh_string.format('ec2-user', server.replace('https://', '')), shell=True) except subprocess.CalledProcessError as exc: print(exc.output) print('Done restarting servers.') tests = conf['tests'] skipped_integrations_conf = conf['skipped_integrations'] all_module_instances = [] filtered_tests, filter_configured, run_all_tests = extract_filtered_tests(is_nightly=options.is_nightly) tests_for_iteration = tests if run_all_tests: # skip test button testing skipped_instance_test_message = 'Not running instance tests when {} is turned on'.format(RUN_ALL_TESTS_FORMAT) prints_manager.add_print_job(skipped_instance_test_message, print_warning, 0) tests_for_iteration = [] elif filter_configured and filtered_tests: tests_for_iteration = [test for test in tests if test.get('playbookID', '') in filtered_tests] tests_for_iteration = filter_tests_with_incompatible_version(tests_for_iteration, server_numeric_version, prints_manager) prints_manager.execute_thread_prints(0) # get a list of brand new integrations that way we filter them out to only configure instances # after updating content new_integrations_files, modified_integrations_files = get_new_and_modified_integration_files(git_sha1) new_integrations_names, modified_integrations_names = [], [] installed_content_packs_successfully = True if LooseVersion(server_numeric_version) >= LooseVersion('6.0.0'): # sleep for one minute before starting to search and install packs to ensure bucket is ready prints_manager.add_print_job('Sleeping for 1 minute...', print_warning, 0) prints_manager.execute_thread_prints(0) sleep(60) if options.nightly: threads_list = [] threads_prints_manager = ParallelPrintsManager(len(servers)) # For each server url we install content for thread_index, server_url in enumerate(servers): client = demisto_client.configure(base_url=server_url, username=username, password=password, verify_ssl=False) t = Thread(target=install_all_content_packs, kwargs={'client': client, 'host': server_url, 'prints_manager': threads_prints_manager, 'thread_index': thread_index}) threads_list.append(t) run_threads_list(threads_list) else: # install content packs in every server pack_ids = get_pack_ids_to_install() for server_url in servers: try: client = demisto_client.configure(base_url=server_url, username=username, password=password, verify_ssl=False) search_and_install_packs_and_their_dependencies(pack_ids, client, prints_manager, options.is_nightly) except Exception as exc: prints_manager.add_print_job(str(exc), print_error, 0) prints_manager.execute_thread_prints(0) installed_content_packs_successfully = False if new_integrations_files: new_integrations_names = get_integration_names_from_files(new_integrations_files) new_integrations_names_message = \ 'New Integrations Since Last Release:\n{}\n'.format('\n'.join(new_integrations_names)) prints_manager.add_print_job(new_integrations_names_message, print_warning, 0) if modified_integrations_files: modified_integrations_names = get_integration_names_from_files(modified_integrations_files) modified_integrations_names_message = \ 'Updated Integrations Since Last Release:\n{}\n'.format('\n'.join(modified_integrations_names)) prints_manager.add_print_job(modified_integrations_names_message, print_warning, 0) prints_manager.execute_thread_prints(0) # Each test is a dictionary from Tests/conf.json which may contain the following fields # ""playbookID"", ""integrations"", ""instance_names"", ""timeout"", ""nightly"", ""fromversion"", ""toversion"" # Note that only the ""playbookID"" field is required with all of the others being optional. # Most tests have an ""integrations"" field listing the integration used for that playbook # and sometimes an ""instance_names"" field which is used when there are multiple instances # of an integration that we want to configure with different configuration values. Look at # [conf.json](../conf.json) for examples brand_new_integrations = [] for test in tests_for_iteration: testing_client = demisto_client.configure(base_url=servers[0], username=username, password=password, verify_ssl=False) integrations = get_integrations_for_test(test, skipped_integrations_conf) instance_names_conf = test.get('instance_names', []) if not isinstance(instance_names_conf, list): instance_names_conf = [instance_names_conf] integrations_names = [i.get('name') for i in integrations] prints_manager.add_print_job('All Integrations for test ""{}"":'.format(test.get('playbookID')), print_warning, 0) prints_manager.add_print_job(integrations_names, print_warning, 0) new_integrations, modified_integrations, unchanged_integrations, integration_to_status = group_integrations( integrations, skipped_integrations_conf, new_integrations_names, modified_integrations_names ) integrations_msg = '\n'.join(['""{}"" - {}'.format(key, val) for key, val in integration_to_status.items()]) prints_manager.add_print_job('{}\n'.format(integrations_msg), print_warning, 0) integrations_to_configure = modified_integrations[:] integrations_to_configure.extend(unchanged_integrations) # set params for new integrations and [modified + unchanged] integrations, then add the new ones # to brand_new_integrations list for later use placeholders_map = {'%%SERVER_HOST%%': servers[0]} new_ints_params_set = set_integration_params(new_integrations, secret_params, instance_names_conf, placeholders_map) ints_to_configure_params_set = set_integration_params(integrations_to_configure, secret_params, instance_names_conf, placeholders_map) if not new_ints_params_set: prints_manager.add_print_job( 'failed setting parameters for integrations ""{}""'.format('\n'.join(new_integrations)), print_error, 0) if not ints_to_configure_params_set: prints_manager.add_print_job( 'failed setting parameters for integrations ""{}""'.format('\n'.join(integrations_to_configure)), print_error, 0) if not (new_ints_params_set and ints_to_configure_params_set): continue prints_manager.execute_thread_prints(0) brand_new_integrations.extend(new_integrations) module_instances = [] for integration in integrations_to_configure: placeholders_map = {'%%SERVER_HOST%%': servers[0]} module_instance = configure_integration_instance(integration, testing_client, prints_manager, placeholders_map) if module_instance: module_instances.append(module_instance) all_module_instances.extend(module_instances) preupdate_fails = set() postupdate_fails = set() preupdate_success = set() postupdate_success = set() # Test all module instances (of modified + unchanged integrations) pre-updating content if all_module_instances: # only print start message if there are instances to configure prints_manager.add_print_job('Start of Instance Testing (""Test"" button) prior to Content Update:', print_warning, 0) else: prints_manager.add_print_job('No integrations to configure for the chosen tests. (Pre-update)', print_warning, 0) prints_manager.execute_thread_prints(0) for instance in all_module_instances: testing_client = demisto_client.configure(base_url=servers[0], username=username, password=password, verify_ssl=False) integration_of_instance = instance.get('brand', '') instance_name = instance.get('name', '') msg = 'Testing (""Test"" button) for instance ""{}"" of integration ""{}"".'.format(instance_name, integration_of_instance) prints_manager.add_print_job(msg, print_color, 0, LOG_COLORS.GREEN) prints_manager.execute_thread_prints(0) # If there is a failure, __test_integration_instance will print it success, _ = __test_integration_instance(testing_client, instance, prints_manager) prints_manager.execute_thread_prints(0) if not success: preupdate_fails.add((instance_name, integration_of_instance)) else: preupdate_success.add((instance_name, integration_of_instance)) if LooseVersion(server_numeric_version) < LooseVersion('6.0.0'): threads_list = [] threads_prints_manager = ParallelPrintsManager(len(servers)) # For each server url we install content for thread_index, server_url in enumerate(servers): client = demisto_client.configure(base_url=server_url, username=username, password=password, verify_ssl=False) t = Thread(target=update_content_on_demisto_instance, kwargs={'client': client, 'server': server_url, 'ami_name': ami_env, 'prints_manager': threads_prints_manager, 'thread_index': thread_index}) threads_list.append(t) run_threads_list(threads_list) # configure instances for new integrations new_integration_module_instances = [] for integration in brand_new_integrations: placeholders_map = {'%%SERVER_HOST%%': servers[0]} new_integration_module_instance = configure_integration_instance(integration, testing_client, prints_manager, placeholders_map) if new_integration_module_instance: new_integration_module_instances.append(new_integration_module_instance) all_module_instances.extend(new_integration_module_instances) # After content upload has completed - test (""Test"" button) integration instances # Test all module instances (of pre-existing AND new integrations) post-updating content if all_module_instances: # only print start message if there are instances to configure prints_manager.add_print_job('Start of Instance Testing (""Test"" button) after the Content Update:', print_warning, 0) else: prints_manager.add_print_job('No integrations to configure for the chosen tests. (Post-update)', print_warning, 0) prints_manager.execute_thread_prints(0) for instance in all_module_instances: integration_of_instance = instance.get('brand', '') instance_name = instance.get('name', '') msg = 'Testing (""Test"" button) for instance ""{}"" of integration ""{}"" .'.format(instance_name, integration_of_instance) prints_manager.add_print_job(msg, print_color, 0, LOG_COLORS.GREEN) prints_manager.execute_thread_prints(0) # If there is a failure, __test_integration_instance will print it success, _ = __test_integration_instance(testing_client, instance, prints_manager) prints_manager.execute_thread_prints(0) if not success: postupdate_fails.add((instance_name, integration_of_instance)) else: postupdate_success.add((instance_name, integration_of_instance)) # reinitialize all clients since their authorization has probably expired by now for server_url in servers: client = demisto_client.configure(base_url=server_url, username=username, password=password, verify_ssl=False) __disable_integrations_instances(client, all_module_instances, prints_manager) prints_manager.execute_thread_prints(0) success = report_tests_status(preupdate_fails, postupdate_fails, preupdate_success, postupdate_success, new_integrations_names, prints_manager) prints_manager.execute_thread_prints(0) if not success or not installed_content_packs_successfully: sys.exit(2) " 36029,"def test_get_by_label(setup_groups): """"""Verify that using the LABEL will retrieve the correct entity."""""" entity_01, entity_02, entity_03 = setup_groups param = GroupParamType() identifier = '{}'.format(entity_01.label) result = param.convert(identifier, None, None) assert result.uuid == entity_01.uuid ","def test_get_by_label(setup_groups): """"""Verify that using the LABEL will retrieve the correct entity."""""" entity_01, entity_02, entity_03 = setup_groups param = GroupParamType() identifier = '{}'.format(entity_01.label) result = param.convert(identifier, None, None) entity_01, _, _ = setup_groups " 42729,"def test_configuration(rotkehlchen_api_server): """"""Test that the configuration endpoint returns the expected information"""""" response = requests.get(api_url_for(rotkehlchen_api_server, ""configurationresource"")) response_json = assert_proper_response_with_result(response) assert 'max_size_in_mb_all_logs' in response_json assert 'max_num_log_files' in response_json assert 'sqlite_instructions' in response_json assert 'sleep_secs' in response_json assert response_json['max_size_in_mb_all_logs']['is_default'] is True assert response_json['max_num_log_files']['is_default'] is True assert response_json['sqlite_instructions']['is_default'] is True assert response_json['sleep_secs']['is_default'] is False ","def test_configuration(rotkehlchen_api_server): """"""Test that the configuration endpoint returns the expected information"""""" response = requests.get(api_url_for(rotkehlchen_api_server, 'configurationresource')) response_json = assert_proper_response_with_result(response) assert 'max_size_in_mb_all_logs' in response_json assert 'max_num_log_files' in response_json assert 'sqlite_instructions' in response_json assert 'sleep_secs' in response_json assert response_json['max_size_in_mb_all_logs']['is_default'] is True assert response_json['max_num_log_files']['is_default'] is True assert response_json['sqlite_instructions']['is_default'] is True assert response_json['sleep_secs']['is_default'] is False " 42344,"def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs): ''' Run an ``ansible-doc`` command to get a role argument specification. .. note:: Version added: 2.2 :param str role: Simple role name, or fully qualified collection role name, to query. :param str collection: If specified, will be combined with the role name to form a fully qualified collection role name. If this is supplied, the ``role`` param should not be fully qualified. :param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles. :param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``. :param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be the work directory within container. :param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param bool quiet: Disable all output :param bool json_mode: Store event data in place of stdout on the console and in the stdout file :param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation (based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution. :param bool process_isolation: Enable process isolation, using a container engine (e.g. podman). :param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param list container_options: List of container options to pass to execution engine. :param str container_workdir: The working directory within the container. :param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param str private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param str ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution. Default value is 'False' :returns: A tuple of response and error string. The response is a python dictionary object (as returned by ansible-doc JSON output) containing each role found, or an empty dict if none are found. ''' event_callback_handler = kwargs.pop('event_handler', None) status_callback_handler = kwargs.pop('status_handler', None) artifacts_handler = kwargs.pop('artifacts_handler', None) cancel_callback = kwargs.pop('cancel_callback', None) finished_callback = kwargs.pop('finished_callback', None) rd = DocConfig(**kwargs) rd.prepare_role_argspec_command(role, collection, playbook_dir) r = Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler, cancel_callback=cancel_callback, finished_callback=finished_callback) r.run() response = r.stdout.read() error = r.stderr.read() if response: response = json.loads(sanitize_json_response(response)) return response, error ","def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs): ''' Run an ``ansible-doc`` command to get a role argument specification. .. note:: Version added: 2.2 :param str role: Simple role name, or fully qualified collection role name, to query. :param str collection: If specified, will be combined with the role name to form a fully qualified collection role name. If this is supplied, the ``role`` param should not be fully qualified. :param str playbook_dir: This parameter is used to set the relative path to handle playbook adjacent installed roles. :param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``. :param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be the work directory within container. :param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param bool quiet: Disable all output :param bool json_mode: Store event data in place of stdout on the console and in the stdout file :param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation (based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution. :param bool process_isolation: Enable process isolation, using a container engine (e.g. podman). :param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param list container_options: List of container options to pass to execution engine. :param str container_workdir: The working directory within the container. :param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param str private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param str ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution. Default value is 'False' :returns: A tuple of response and error string. The response is a python dictionary object (as returned by ansible-doc JSON output) containing each role found, or an empty dict if none are found. ''' event_callback_handler = kwargs.pop('event_handler', None) status_callback_handler = kwargs.pop('status_handler', None) artifacts_handler = kwargs.pop('artifacts_handler', None) cancel_callback = kwargs.pop('cancel_callback', None) finished_callback = kwargs.pop('finished_callback', None) rd = DocConfig(**kwargs) rd.prepare_role_argspec_command(role, collection, playbook_dir) r = Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler, cancel_callback=cancel_callback, finished_callback=finished_callback) r.run() response = r.stdout.read() error = r.stderr.read() if response: response = json.loads(sanitize_json_response(response)) return response, error " 36053,"def validate_instructions(instructions, ctx): # pylint: disable=inconsistent-return-statements, unused-argument """"""Check that the instructions dict contains the necessary keywords"""""" instructions_dict = instructions.get_dict() retrieve_files = instructions_dict.get('retrieve_files', None) if retrieve_files is None: errmsg = ( '\n\n' 'no indication of what to do in the instruction node:\n > {}\n' '(to store the files in the repository set retrieve_files=True,\n' 'to copy them to the specified folder on the remote computer,\n' 'set it to False)\n' ) return errmsg.format(instructions.uuid) if not isinstance(retrieve_files, bool): errmsg = ( 'entry for retrieve files inside of instruction node {} must be\n' 'either True or False; instead, it is: {}' ) return errmsg.format(instructions.uuid, retrieve_files) local_files = instructions_dict.get('local_files', None) remote_files = instructions_dict.get('remote_files', None) symlink_files = instructions_dict.get('symlink_files', None) if not any([local_files, remote_files, symlink_files]): errmsg = ( 'no indication of which files to copy were found in the instruction node {}.\n' 'Please include at least one of `local_files`, `remote_files`, or `symlink_files`.\n' 'These should be lists containing tuples following the pattern:\n' '[ ... (source_node_key, source_relpath, target_relpath) ... ] \n' ) return errmsg.format(instructions.uuid) ","def validate_instructions(instructions, ctx): # pylint: disable=inconsistent-return-statements, unused-argument """"""Check that the instructions dict contains the necessary keywords"""""" instructions_dict = instructions.get_dict() retrieve_files = instructions_dict.get('retrieve_files', None) if retrieve_files is None: errmsg = ( '\n\n' f'no indication of what to do in the instruction node: {instructions}\n' '(to store the files in the repository set retrieve_files=True,\n' 'to copy them to the specified folder on the remote computer,\n' 'set it to False)\n' ) return errmsg.format(instructions.uuid) if not isinstance(retrieve_files, bool): errmsg = ( 'entry for retrieve files inside of instruction node {} must be\n' 'either True or False; instead, it is: {}' ) return errmsg.format(instructions.uuid, retrieve_files) local_files = instructions_dict.get('local_files', None) remote_files = instructions_dict.get('remote_files', None) symlink_files = instructions_dict.get('symlink_files', None) if not any([local_files, remote_files, symlink_files]): errmsg = ( 'no indication of which files to copy were found in the instruction node {}.\n' 'Please include at least one of `local_files`, `remote_files`, or `symlink_files`.\n' 'These should be lists containing tuples following the pattern:\n' '[ ... (source_node_key, source_relpath, target_relpath) ... ] \n' ) return errmsg.format(instructions.uuid) " 2549,"def precision_at_k_score( y_true, y_score, *, k=1, sample_weight=None, ignore_ties=False ): """"""Compute Precision@K. Calculate precision for the top-K scored labels. In Information Retrieval paradigm, each sample i represents a query, ``y_true[i]`` - relevance indicators per document (relevant/not relevant), and ``y_score[i]`` - predicted scores per document (used for ranking). The top-scored documents are then considered to be ""retrieved"" and being evaluated given their true relevance. This ranking metric returns a high value if relevant documents are ranked high by ``y_score``. Although the metric takes value in [0, 1] interval, the best scoring function (``y_score``) may not achieve precision@k of 1 if the number of positive labels is less than k. Parameters ---------- y_true : ndarray of shape (n_samples, n_labels) True relevance indicators of entities to be ranked. Any non-zero value is treated as positive/relevant. y_score : ndarray of shape (n_samples, n_labels) Target scores, can either be probability estimates, confidence values, or non-thresholded measure of decisions (as returned by ""decision_function"" on some classifiers). k : int, default=1 Only consider the highest k scores in the ranking. sample_weight : ndarray of shape (n_samples,), default=None Sample weights. If `None`, all samples are given the same weight. ignore_ties : bool, default=False Assume that there are no ties in y_score (which is likely to be the case if y_score is continuous) for efficiency gains. Returns ------- precision_at_k : float in [0., 1.] The averaged precision@k for all samples. References ---------- `Wikipedia entry for Precision At K `_ Manning, Christopher D.; Raghavan, Prabhakar; Schütze, Hinrich (2008). Introduction to Information Retrieval. Cambridge University Press. McSherry, F., & Najork, M. (2008, March). Computing information retrieval performance measures efficiently in the presence of tied scores. In European conference on information retrieval (pp. 414-421). Springer, Berlin, Heidelberg. Examples -------- >>> import numpy as np >>> from sklearn.metrics import precision_at_k_score >>> # we have groud-truth (binary) relevance of some answers to a query: >>> true_relevance = [[0, 1, 0, 1]] >>> # we predict some (relevance) scores for the answers >>> scores = [[0.1, 0.2, 0.3, 0.4]] >>> # we can get the true relevance of the top scored answer (precision@1) >>> precision_at_k_score(true_relevance, scores) 1 >>> # we can get the average true relevance of the top k answers (precision@k) >>> precision_at_k_score(true_relevance, scores, k=3) 0.66... >>> # now we have some ties in our prediction >>> scores = np.asarray([[0, 0, 1, 1]]) >>> # by default ties are averaged, so here we get the average >>> # true relevance of our top predictions >>> precision_at_k_score(true_relevance, scores, k=1) 0.5 >>> # we can choose to ignore ties for faster results, but only >>> # if we know there aren't ties in our scores, otherwise we get >>> # wrong results: >>> precision_at_k_score(true_relevance, scores, k=1, ignore_ties=True) 0.0 """""" y_true = check_array(y_true, ensure_2d=True) if set(np.unique(y_true)) - {0, 1}: raise ValueError( ""Relevance values (y_true) have to be 0 or 1. Got {} instead"".format( (set(np.unique(y_true)) - {0, 1}).pop() ) ) y_score = check_array(y_score, ensure_2d=True) check_consistent_length(y_true, y_score, sample_weight) if y_true.shape != y_score.shape: raise ValueError( ""Input matrices have inconsisten shapes: {} vs {}"".format( y_true.shape, y_score.shape ) ) if not isinstance(k, (int, np.integer)) or k < 1 or k >= y_true.shape[1]: raise ValueError( ""Expected k to be an integer from interval [1, {}). Got {} instead"".format( y_true.shape[1], k ) ) if ignore_ties: top_score_index = np.argpartition(-y_score, k)[:, :k] top_scored_labels = y_true[ np.arange(top_score_index.shape[0])[:, np.newaxis], top_score_index ] precision_by_sample = top_scored_labels.mean(axis=1) else: precision_by_sample = [ _tie_averaged_precision_at_k(y_t, y_s, k) for y_t, y_s in zip(y_true, y_score) ] return np.average(precision_by_sample, weights=sample_weight) ","def precision_at_k_score( y_true, y_score, *, k=1, sample_weight=None, ignore_ties=False ): """"""Compute Precision@K. Calculate precision for the top-K scored labels. In Information Retrieval paradigm, each sample i represents a query, ``y_true[i]`` - relevance indicators per document (relevant/not relevant), and ``y_score[i]`` - predicted scores per document (used for ranking). The top-scored documents are then considered to be ""retrieved"" and being evaluated given their true relevance. This ranking metric returns a high value if relevant documents are ranked high by ``y_score``. Although the metric takes value in [0, 1] interval, the best scoring function (``y_score``) may not achieve precision@k of 1 if the number of positive labels is less than k. Parameters ---------- y_true : ndarray of shape (n_samples, n_labels) True relevance indicators of entities to be ranked. Any non-zero value is treated as positive/relevant. y_score : ndarray of shape (n_samples, n_labels) Target scores, can either be probability estimates, confidence values, or non-thresholded measure of decisions (as returned by ""decision_function"" on some classifiers). k : int, default=1 Only consider the highest k scores in the ranking. sample_weight : ndarray of shape (n_samples,), default=None Sample weights. If `None`, all samples are given the same weight. ignore_ties : bool, default=False Assume that there are no ties in y_score (which is likely to be the case if y_score is continuous) for efficiency gains. Returns ------- precision_at_k : float in [0., 1.] The averaged precision@k for all samples. References ---------- `Wikipedia entry for Precision At K `_ Manning, Christopher D.; Raghavan, Prabhakar; Schütze, Hinrich (2008). Introduction to Information Retrieval. Cambridge University Press. McSherry, F., & Najork, M. (2008, March). Computing information retrieval performance measures efficiently in the presence of tied scores. In European conference on information retrieval (pp. 414-421). Springer, Berlin, Heidelberg. Examples -------- >>> import numpy as np >>> from sklearn.metrics import precision_at_k_score >>> # we have groud-truth (binary) relevance of some answers to a query: >>> true_relevance = [[0, 1, 0, 1]] >>> # we predict some (relevance) scores for the answers >>> scores = [[0.1, 0.2, 0.3, 0.4]] >>> # we can get the true relevance of the top scored answer (precision@1) >>> precision_at_k_score(true_relevance, scores) 1.0 >>> # we can get the average true relevance of the top k answers (precision@k) >>> precision_at_k_score(true_relevance, scores, k=3) 0.66... >>> # now we have some ties in our prediction >>> scores = np.asarray([[0, 0, 1, 1]]) >>> # by default ties are averaged, so here we get the average >>> # true relevance of our top predictions >>> precision_at_k_score(true_relevance, scores, k=1) 0.5 >>> # we can choose to ignore ties for faster results, but only >>> # if we know there aren't ties in our scores, otherwise we get >>> # wrong results: >>> precision_at_k_score(true_relevance, scores, k=1, ignore_ties=True) 0.0 """""" y_true = check_array(y_true, ensure_2d=True) if set(np.unique(y_true)) - {0, 1}: raise ValueError( ""Relevance values (y_true) have to be 0 or 1. Got {} instead"".format( (set(np.unique(y_true)) - {0, 1}).pop() ) ) y_score = check_array(y_score, ensure_2d=True) check_consistent_length(y_true, y_score, sample_weight) if y_true.shape != y_score.shape: raise ValueError( ""Input matrices have inconsisten shapes: {} vs {}"".format( y_true.shape, y_score.shape ) ) if not isinstance(k, (int, np.integer)) or k < 1 or k >= y_true.shape[1]: raise ValueError( ""Expected k to be an integer from interval [1, {}). Got {} instead"".format( y_true.shape[1], k ) ) if ignore_ties: top_score_index = np.argpartition(-y_score, k)[:, :k] top_scored_labels = y_true[ np.arange(top_score_index.shape[0])[:, np.newaxis], top_score_index ] precision_by_sample = top_scored_labels.mean(axis=1) else: precision_by_sample = [ _tie_averaged_precision_at_k(y_t, y_s, k) for y_t, y_s in zip(y_true, y_score) ] return np.average(precision_by_sample, weights=sample_weight) " 31168,"def get_successful_and_failed_packs(packs_results_file_path: str, stage: str) -> Tuple[dict, dict, dict]: """""" Loads the packs_results.json file to get the successful and failed packs dicts Args: packs_results_file_path (str): The path to the file stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE Returns: dict: The successful packs dict dict: The failed packs dict dict: The images data dict """""" if os.path.exists(packs_results_file_path): packs_results_file = load_json(packs_results_file_path) stage = packs_results_file.get(stage, {}) successful_packs_dict = stage.get(BucketUploadFlow.SUCCESSFUL_PACKS, {}) failed_packs_dict = stage.get(BucketUploadFlow.FAILED_PACKS, {}) images_data_dict = stage.get(BucketUploadFlow.IMAGES, {}) return successful_packs_dict, failed_packs_dict, images_data_dict return {}, {}, {} ","def get_successful_and_failed_packs(packs_results_file_path: str, stage: str) -> Tuple[dict, dict, dict]: """""" Loads the packs_results.json file to get the successful and failed packs together with uploaded images dicts Args: packs_results_file_path (str): The path to the file stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE Returns: dict: The successful packs dict dict: The failed packs dict dict: The images data dict """""" if os.path.exists(packs_results_file_path): packs_results_file = load_json(packs_results_file_path) stage = packs_results_file.get(stage, {}) successful_packs_dict = stage.get(BucketUploadFlow.SUCCESSFUL_PACKS, {}) failed_packs_dict = stage.get(BucketUploadFlow.FAILED_PACKS, {}) images_data_dict = stage.get(BucketUploadFlow.IMAGES, {}) return successful_packs_dict, failed_packs_dict, images_data_dict return {}, {}, {} " 8715,"def test_plugin_load_entrypoint(tmpdir): root = tmpdir.mkdir('loader_mods') mod_file = root.join('file_mod.py') mod_file.write(MOCK_MODULE_CONTENT) # generate setuptools Distribution object distrib = pkg_resources.Distribution(root.strpath) sys.path.append(root.strpath) # load the entrypoint try: entrypoint = pkg_resources.EntryPoint( 'test_plugin', 'file_mod', dist=distrib) plugin = plugins.handlers.EntrypointPluginHandler(entrypoint) plugin.load() finally: sys.path.remove(root.strpath) assert plugin.name == 'test_plugin' test_mod = plugin._module assert hasattr(test_mod, 'first_command') assert hasattr(test_mod, 'second_command') assert hasattr(test_mod, 'interval5s') assert hasattr(test_mod, 'interval10s') assert hasattr(test_mod, 'example_url') assert hasattr(test_mod, 'shutdown') assert hasattr(test_mod, 'ignored') ","def test_plugin_load_entrypoint(tmpdir): root = tmpdir.mkdir('loader_mods') mod_file = root.join('file_mod.py') mod_file.write(MOCK_MODULE_CONTENT) # generate setuptools Distribution object distrib = pkg_resources.Distribution(root.strpath) sys.path.append(root.strpath) # load the entrypoint try: entry_point = pkg_resources.EntryPoint( 'test_plugin', 'file_mod', dist=distrib) plugin = plugins.handlers.EntrypointPluginHandler(entrypoint) plugin.load() finally: sys.path.remove(root.strpath) assert plugin.name == 'test_plugin' test_mod = plugin._module assert hasattr(test_mod, 'first_command') assert hasattr(test_mod, 'second_command') assert hasattr(test_mod, 'interval5s') assert hasattr(test_mod, 'interval10s') assert hasattr(test_mod, 'example_url') assert hasattr(test_mod, 'shutdown') assert hasattr(test_mod, 'ignored') " 59203,"def main(): import os import sys import runpy import pstats from optparse import OptionParser usage = ""cProfile.py [-o output_file_path] [-s sort] [-m module | scriptfile] [arg] ..."" parser = OptionParser(usage=usage) parser.allow_interspersed_args = False parser.add_option('-o', '--outfile', dest=""outfile"", help=""Save stats to "", default=None) parser.add_option('-s', '--sort', dest=""sort"", help=""Sort order when printing to stdout, based on pstats.Stats class"", default=-1, choices=sorted(pstats.Stats.sort_arg_dict_default)) parser.add_option('-m', dest=""module"", action=""store_true"", help=""Profile a library module"", default=False) if not sys.argv[1:]: parser.print_usage() sys.exit(2) (options, args) = parser.parse_args() sys.argv[:] = args # the script that we're profiling may chdir, so capture the absolute path # to the output file at startup if options.outfile is not None: options.outfile = os.path.abspath(options.outfile) if len(args) > 0: if options.module: code = ""run_module(modname, run_name='__main__')"" globs = { 'run_module': runpy.run_module, 'modname': args[0] } else: progname = args[0] sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') globs = { '__file__': progname, '__name__': '__main__', '__package__': None, '__cached__': None, } runctx(code, globs, None, options.outfile, options.sort) else: parser.print_usage() return parser ","def main(): import os import sys import runpy import pstats from optparse import OptionParser usage = ""cProfile.py [-o output_file_path] [-s sort] [-m module | scriptfile] [arg] ..."" parser = OptionParser(usage=usage) parser.allow_interspersed_args = False parser.add_option('-o', '--outfile', dest=""outfile"", help=""Save stats to "", default=None) parser.add_option('-s', '--sort', dest=""sort"", help=""Sort order when printing to stdout, based on pstats.Stats class"", default=-1, choices=sorted(pstats.Stats.sort_arg_dict_default)) parser.add_option('-m', dest=""module"", action=""store_true"", help=""Profile a library module"", default=False) if not sys.argv[1:]: parser.print_usage() sys.exit(2) (options, args) = parser.parse_args() sys.argv[:] = args # The script that we're profiling may chdir, so capture the absolute path # to the output file at startup. if options.outfile is not None: options.outfile = os.path.abspath(options.outfile) if len(args) > 0: if options.module: code = ""run_module(modname, run_name='__main__')"" globs = { 'run_module': runpy.run_module, 'modname': args[0] } else: progname = args[0] sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') globs = { '__file__': progname, '__name__': '__main__', '__package__': None, '__cached__': None, } runctx(code, globs, None, options.outfile, options.sort) else: parser.print_usage() return parser " 10919,"def get_os_version(): """"""Determine system version."""""" # platform.dist was removed in Python 3.8 if hasattr(platform, 'dist'): os_version = platform.dist()[1] elif HAVE_DISTRO: os_version = distro.version() elif os.path.exists(ETC_OS_RELEASE): os_release_txt = read_file(ETC_OS_RELEASE) version_regex = re.compile('^VERSION=""?(?P[^""\n]+)""?$', re.M) res = version_regex.search(os_release_txt) if res: os_version = res.group('version') else: # VERSION may not always be defined (for example on Gentoo), # fall back to VERSION_ID in that case version_regex = re.compile('^VERSION_ID=""?(?P[^""\n]+)""?$', re.M) res = version_regex.search(os_release_txt) if res: os_version = res.group('version') else: os_version = None if os_version: if get_os_name() in [""suse"", ""SLES""]: # SLES subversions can only be told apart based on kernel version, # see http://wiki.novell.com/index.php/Kernel_versions version_suffixes = { '11': [ ('2.6.27', ''), ('2.6.32', '_SP1'), ('3.0.101-63', '_SP4'), # not 100% correct, since early SP3 had 3.0.76 - 3.0.93, but close enough? ('3.0.101', '_SP3'), # SP2 kernel versions range from 3.0.13 - 3.0.101 ('3.0', '_SP2'), ], '12': [ ('3.12.28', ''), ('3.12.49', '_SP1'), ], } # append suitable suffix to system version if os_version in version_suffixes.keys(): kernel_version = platform.uname()[2] known_sp = False for (kver, suff) in version_suffixes[os_version]: if kernel_version.startswith(kver): os_version += suff known_sp = True break if not known_sp: suff = '_UNKNOWN_SP' else: raise EasyBuildError(""Don't know how to determine subversions for SLES %s"", os_version) return os_version else: return UNKNOWN ","def get_os_version(): """"""Determine system version."""""" # platform.dist was removed in Python 3.8 if hasattr(platform, 'dist'): os_version = platform.dist()[1] if HAVE_DISTRO and not os_version: os_version = distro.version() elif os.path.exists(ETC_OS_RELEASE): os_release_txt = read_file(ETC_OS_RELEASE) version_regex = re.compile('^VERSION=""?(?P[^""\n]+)""?$', re.M) res = version_regex.search(os_release_txt) if res: os_version = res.group('version') else: # VERSION may not always be defined (for example on Gentoo), # fall back to VERSION_ID in that case version_regex = re.compile('^VERSION_ID=""?(?P[^""\n]+)""?$', re.M) res = version_regex.search(os_release_txt) if res: os_version = res.group('version') else: os_version = None if os_version: if get_os_name() in [""suse"", ""SLES""]: # SLES subversions can only be told apart based on kernel version, # see http://wiki.novell.com/index.php/Kernel_versions version_suffixes = { '11': [ ('2.6.27', ''), ('2.6.32', '_SP1'), ('3.0.101-63', '_SP4'), # not 100% correct, since early SP3 had 3.0.76 - 3.0.93, but close enough? ('3.0.101', '_SP3'), # SP2 kernel versions range from 3.0.13 - 3.0.101 ('3.0', '_SP2'), ], '12': [ ('3.12.28', ''), ('3.12.49', '_SP1'), ], } # append suitable suffix to system version if os_version in version_suffixes.keys(): kernel_version = platform.uname()[2] known_sp = False for (kver, suff) in version_suffixes[os_version]: if kernel_version.startswith(kver): os_version += suff known_sp = True break if not known_sp: suff = '_UNKNOWN_SP' else: raise EasyBuildError(""Don't know how to determine subversions for SLES %s"", os_version) return os_version else: return UNKNOWN " 40159,"def setup_anndata( adata, batch_key: str = None, labels_key: str = None, X_layers_key: str = None, copy: bool = False, ): """"""Sets up anndata object for scVI models. This method will compute the log mean and log variance per batch. A mapping will be created between in Parameters ---------- adata anndata object containing raw counts batch_key key in adata.obs for batch information. Will automatically be converted into integer categories labels_key key in adata.obs for label information. Will automatically be converted into integer categories X_layers_key if not None, uses this as the key in adata.layers for raw count copy if True, a copy of anndata is returned Returns ------- """""" if copy: adata = adata.copy() ###checking layers if X_layers_key is None: if check_nonnegative_integers(adata.X) is False: logger.warning( ""adata.X does not contain unnormalized count data. Are you sure this is what you want?"" ) logger.info(""Using data from adata.X"") else: assert ( X_layers_key in adata.layers.keys() ), ""{} is not a valid key in adata.layers"".format(X_layers_key) if check_nonnegative_integers(adata.layers[X_layers_key]) is False: logger.warning( 'adata.layers[""{}""] does not contain unnormalized count data. Are you sure this is what you want?'.format( X_layers_key ) ) logger.info('Using data from adata.layers[""{}""]'.format(X_layers_key)) ###checking batch if batch_key is None: logger.info(""No batch_key inputted, assuming all cells are same batch"") batch_key = ""_scvi_batch"" adata.obs[batch_key] = np.zeros(adata.shape[0]) else: assert ( batch_key in adata.obs.keys() ), ""{} is not a valid key in adata.obs"".format(batch_key) logger.info('Using batches from adata.obs[""{}""]'.format(batch_key)) # check the datatype of batches. if theyre not integers, make them ints user_batch_dtype = adata.obs[batch_key].dtype if np.issubdtype(user_batch_dtype, np.integer) is False: adata.obs[""_scvi_batch""] = adata.obs[batch_key].astype(""category"").cat.codes batch_key = ""_scvi_batch"" if labels_key is None: logger.info(""No label_key inputted, assuming all cells have same label"") labels_key = ""_scvi_labels"" adata.obs[labels_key] = np.zeros(adata.shape[0]) else: assert ( labels_key in adata.obs.keys() ), ""{} is not a valid key for in adata.obs"".format(labels_key) logger.info('Using labels from adata.obs[""{}""]'.format(labels_key)) # check the datatype of labels. if theyre not integers, make them ints user_labels_dtype = adata.obs[labels_key].dtype if np.issubdtype(user_labels_dtype, np.integer) is False: adata.obs[""_scvi_labels""] = adata.obs[labels_key].astype(""category"").cat.codes labels_key = ""_scvi_labels"" # computes the library size per batch local_l_mean_key = ""_scvi_local_l_mean"" local_l_var_key = ""_scvi_local_l_var"" logger.info(""Calculating log mean and log variance per batch"") compute_library_size_batch( adata, batch_key=batch_key, local_l_mean_key=local_l_mean_key, local_l_var_key=local_l_var_key, X_layers_key=X_layers_key, ) if X_layers_key is None: X_loc = None X_key = ""X"" else: X_loc = ""layers"" X_key = X_layers_key data_registry = { X_KEY: (X_loc, X_key), BATCH_KEY: (""obs"", batch_key), LOCAL_L_MEAN_KEY: (""obs"", local_l_mean_key), LOCAL_L_VAR_KEY: (""obs"", local_l_var_key), LABELS_KEY: (""obs"", labels_key), } register_anndata(adata, data_registry_dict=data_registry) n_batch = len(np.unique(adata.obs[batch_key])) n_cells = adata.shape[0] n_genes = adata.shape[1] summary_stats = {""n_batch"": n_batch, ""n_cells"": n_cells, ""n_genes"": n_genes} logger.info( ""Successfully registered anndata object containing {} cells, {} genes, and {} batches \nRegistered keys:{}"".format( n_cells, n_genes, n_batch, list(data_registry.keys()) ) ) adata.uns[""scvi_summary_stats""] = summary_stats if copy: return adata ","def setup_anndata( adata, batch_key: str = None, labels_key: str = None, X_layers_key: str = None, copy: bool = False, ): """"""Sets up anndata object for scVI models. This method will compute the log mean and log variance per batch. A mapping will be created between in Parameters ---------- adata anndata object containing raw counts batch_key key in adata.obs for batch information. Will automatically be converted into integer categories labels_key key in adata.obs for label information. Will automatically be converted into integer categories X_layers_key if not None, uses this as the key in adata.layers for raw count copy if True, a copy of anndata is returned Returns ------- """""" if copy: adata = adata.copy() ###checking layers if X_layers_key is None: if check_nonnegative_integers(adata.X) is False: logger.warning( ""adata.X does not contain unnormalized count data. Are you sure this is what you want?"" ) logger.info(""Using data from adata.X"") else: assert ( X_layers_key in adata.layers.keys() ), ""{} is not a valid key in adata.layers"".format(X_layers_key) if check_nonnegative_integers(adata.layers[X_layers_key]) is False: logger.warning( 'adata.layers[""{}""] does not contain unnormalized count data. Are you sure this is what you want?'.format( X_layers_key ) ) logger.info('Using data from adata.layers[""{}""]'.format(X_layers_key)) ###checking batch if batch_key is None: logger.info(""No batch_key inputted, assuming all cells are same batch"") batch_key = ""_scvi_batch"" adata.obs[batch_key] = np.zeros(adata.shape[0]) else: assert ( batch_key in adata.obs.keys() ), ""{} is not a valid key in adata.obs"".format(batch_key) logger.info('Using batches from adata.obs[""{}""]'.format(batch_key)) # check the datatype of batches. if theyre not integers, make them ints user_batch_dtype = adata.obs[batch_key].dtype if np.issubdtype(user_batch_dtype, np.integer) is False: adata.obs[""_scvi_batch""] = adata.obs[batch_key].astype(""category"").cat.codes batch_key = ""_scvi_batch"" if labels_key is None: logger.info(""No label_key inputted, assuming all cells have same label"") labels_key = ""_scvi_labels"" adata.obs[labels_key] = np.zeros(adata.shape[0]) else: assert ( labels_key in adata.obs.keys() ), ""{} is not a valid key for in adata.obs"".format(labels_key) logger.info('Using labels from adata.obs[""{}""]'.format(labels_key)) # check the datatype of labels. if theyre not integers, make them ints user_labels_dtype = adata.obs[labels_key].dtype if np.issubdtype(user_labels_dtype, np.integer) is False: adata.obs[""_scvi_labels""] = adata.obs[labels_key].astype(""category"").cat.codes labels_key = ""_scvi_labels"" # computes the library size per batch local_l_mean_key = ""_scvi_local_l_mean"" local_l_var_key = ""_scvi_local_l_var"" logger.info(""Computing library size prior per batch"") compute_library_size_batch( adata, batch_key=batch_key, local_l_mean_key=local_l_mean_key, local_l_var_key=local_l_var_key, X_layers_key=X_layers_key, ) if X_layers_key is None: X_loc = None X_key = ""X"" else: X_loc = ""layers"" X_key = X_layers_key data_registry = { X_KEY: (X_loc, X_key), BATCH_KEY: (""obs"", batch_key), LOCAL_L_MEAN_KEY: (""obs"", local_l_mean_key), LOCAL_L_VAR_KEY: (""obs"", local_l_var_key), LABELS_KEY: (""obs"", labels_key), } register_anndata(adata, data_registry_dict=data_registry) n_batch = len(np.unique(adata.obs[batch_key])) n_cells = adata.shape[0] n_genes = adata.shape[1] summary_stats = {""n_batch"": n_batch, ""n_cells"": n_cells, ""n_genes"": n_genes} logger.info( ""Successfully registered anndata object containing {} cells, {} genes, and {} batches \nRegistered keys:{}"".format( n_cells, n_genes, n_batch, list(data_registry.keys()) ) ) adata.uns[""scvi_summary_stats""] = summary_stats if copy: return adata " 1702,"def plot_confusion_matrix(estimator, X, y_true, labels=None, sample_weight=None, normalize=None, display_labels=None, include_values=True, xticks_rotation='horizontal', values_format=None, cmap='viridis', ax=None): """"""Plot Confusion Matrix. Read more in the :ref:`User Guide `. Parameters ---------- estimator : estimator instance Trained classifier. X : {array-like, sparse matrix} of shape (n_samples, n_features) Input values. y : array-like of shape (n_samples,) Target values. labels : array-like of shape (n_classes,), default=None List of labels to index the matrix. This may be used to reorder or select a subset of labels. If `None` is given, those that appear at least once in `y_true` or `y_pred` are used in sorted order. sample_weight : array-like of shape (n_samples,), default=None Sample weights. normalize : {'true', 'pred', 'all'}, default=None Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized. display_labels : array-like of shape (n_classes,), default=None Target names used for plotting. By default, `labels` will be used if it is defined, otherwise the unique labels of `y_true` and `y_pred` will be used. include_values : bool, default=True Includes values in confusion matrix. xticks_rotation : {'vertical', 'horizontal'} or float, \ default='horizontal' Rotation of xtick labels. values_format : str, default=None Format specification for values in confusion matrix. If `None`, the format specification is '.2g'. cmap : str or matplotlib Colormap, default='viridis' Colormap recognized by matplotlib. ax : matplotlib Axes, default=None Axes object to plot on. If `None`, a new figure and axes is created. Returns ------- display : :class:`~sklearn.metrics.ConfusionMatrixDisplay` Examples -------- >>> import matplotlib.pyplot as plt >>> from sklearn.datasets import make_classification >>> from sklearn.metrics import plot_confusion_matrix >>> from sklearn.model_selection import train_test_split >>> from sklearn.svm import SVC >>> X, y = make_classification(random_state=0) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, random_state=0) >>> clf = SVC(random_state=0) >>> clf.fit(X_train, y_train) >>> plot_confusion_matrix(clf, X_test, y_test) >>> plt.show() """""" check_matplotlib_support(""plot_confusion_matrix"") if not is_classifier(estimator): raise ValueError(""plot_confusion_matrix only supports classifiers"") y_pred = estimator.predict(X) cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight, labels=labels, normalize=normalize) if display_labels is None: if labels is None: display_labels = estimator.classes_ else: display_labels = labels disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display_labels) return disp.plot(include_values=include_values, cmap=cmap, ax=ax, xticks_rotation=xticks_rotation, values_format=values_format) ","def plot_confusion_matrix(estimator, X, y_true, labels=None, sample_weight=None, normalize=None, display_labels=None, include_values=True, xticks_rotation='horizontal', values_format=None, cmap='viridis', ax=None): """"""Plot Confusion Matrix. Read more in the :ref:`User Guide `. Parameters ---------- estimator : estimator instance Trained classifier. X : {array-like, sparse matrix} of shape (n_samples, n_features) Input values. y : array-like of shape (n_samples,) Target values. labels : array-like of shape (n_classes,), default=None List of labels to index the matrix. This may be used to reorder or select a subset of labels. If `None` is given, those that appear at least once in `y_true` or `y_pred` are used in sorted order. sample_weight : array-like of shape (n_samples,), default=None Sample weights. normalize : {'true', 'pred', 'all'}, default=None Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized. display_labels : array-like of shape (n_classes,), default=None Target names used for plotting. By default, `labels` will be used if it is defined, otherwise the unique labels of `y_true` and `y_pred` will be used. include_values : bool, default=True Includes values in confusion matrix. xticks_rotation : {'vertical', 'horizontal'} or float, \ default='horizontal' Rotation of xtick labels. values_format : str, default=None Format specification for values in confusion matrix. If `None`, the format specification is '.2g'. cmap : str or matplotlib Colormap, default='viridis' Colormap recognized by matplotlib. ax : matplotlib Axes, default=None Axes object to plot on. If `None`, a new figure and axes is created. Returns ------- display : :class:`~sklearn.metrics.ConfusionMatrixDisplay` Examples -------- >>> import matplotlib.pyplot as plt >>> from sklearn.datasets import make_classification >>> from sklearn.metrics import plot_confusion_matrix >>> from sklearn.model_selection import train_test_split >>> from sklearn.svm import SVC >>> X, y = make_classification(random_state=0) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, random_state=0) >>> clf = SVC(random_state=0) >>> clf.fit(X_train, y_train) >>> plot_confusion_matrix(clf, X_test, y_test) >>> plt.show() # doctest: +SKIP """""" check_matplotlib_support(""plot_confusion_matrix"") if not is_classifier(estimator): raise ValueError(""plot_confusion_matrix only supports classifiers"") y_pred = estimator.predict(X) cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight, labels=labels, normalize=normalize) if display_labels is None: if labels is None: display_labels = estimator.classes_ else: display_labels = labels disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display_labels) return disp.plot(include_values=include_values, cmap=cmap, ax=ax, xticks_rotation=xticks_rotation, values_format=values_format) " 59828,"def _return_existing_view(flags: int, open_file_group: int, active_group: int, specified_group: int) -> bool: open_side_by_side = bool(flags & (sublime.ADD_TO_SELECTION | sublime.REPLACE_MRU)) file_in_active_group = open_file_group == active_group # true if DONT want to force it to open in the active group # i.e. just jump to the file select_file = not bool(flags & sublime.FORCE_GROUP) if specified_group > -1: return False if open_side_by_side: # default is -1... and is ignored # always open in the specific group if pass in return False elif file_in_active_group: return True else: return select_file ","def _return_existing_view(flags: int, open_file_group: int, active_group: int, specified_group: int) -> bool: if specified_group > -1: return False open_side_by_side = bool(flags & (sublime.ADD_TO_SELECTION | sublime.REPLACE_MRU)) if open_side_by_side: # default is -1... and is ignored. always open in the specific group if pass in return False file_in_active_group = open_file_group == active_group if file_in_active_group: return True # true if DONT want to force it to open in the active group i.e. just jump to the file return not bool(flags & sublime.FORCE_GROUP) " 10892,"def updateSetup(test): zc.buildout.testing.buildoutSetUp(test) new_releases = test.globs['tmpdir']('new_releases') test.globs['new_releases'] = new_releases ws = getWorkingSetWithBuildoutEgg(test) # now let's make the new releases # TOD0 enable new releases of pip wheel setuptools # when eggs enforced makeNewRelease('zc.buildout', ws, new_releases) os.mkdir(os.path.join(new_releases, 'zc.buildout')) ","def updateSetup(test): zc.buildout.testing.buildoutSetUp(test) new_releases = test.globs['tmpdir']('new_releases') test.globs['new_releases'] = new_releases ws = getWorkingSetWithBuildoutEgg(test) # now let's make the new releases # TODO enable new releases of pip wheel setuptools # when eggs enforced makeNewRelease('zc.buildout', ws, new_releases) os.mkdir(os.path.join(new_releases, 'zc.buildout')) " 16116,"def get_type(cloud_id, install_code): """"""Try API call 'get_network_info' to see if target device is Legacy or Eagle-200."""""" reader = FixedLegacyReader(cloud_id, install_code) try: response = reader.get_network_info() except UPDATE_ERRORS as error: _LOGGER.error(""Failed to connect during setup: %s"", error) raise CannotConnect from error # Branch to test if target is Legacy Model if ( ""NetworkInfo"" in response and response[""NetworkInfo""].get(""ModelId"", None) == ""Z109-EAGLE"" ): return TYPE_LEGACY # Branch to test if target is Eagle-200 Model if ( ""Response"" in response and response[""Response""].get(""Command"", None) == ""get_network_info"" ): return TYPE_EAGLE_200 # Catch-all if hardware ID tests fail return None ","def get_type(cloud_id, install_code): """"""Try API call 'get_network_info' to see if target device is Legacy or Eagle-200."""""" reader = FixedLegacyReader(cloud_id, install_code) try: response = reader.get_network_info() except UPDATE_ERRORS as error: _LOGGER.error(""Failed to connect during setup: %s"", error) raise CannotConnect from error # Branch to test if target is Legacy Model if ( ""NetworkInfo"" in response and response[""NetworkInfo""].get(""ModelId"", None) == ""Z109-EAGLE"" ): return TYPE_LEGACY # Branch to test if target is Eagle-200 Model if ( ""Response"" in response and response[""Response""].get(""Command"") == ""get_network_info"" ): return TYPE_EAGLE_200 # Catch-all if hardware ID tests fail return None " 8596,"def test_loads_hex(some_point): assert loads(dumps(some_point, hex=True), hex=True), some_point ","def test_loads_hex(some_point): assert loads(dumps(some_point, hex=True), hex=True) == some_point " 35189,"def pinv(a, rcond=1e-15): """"""Compute the Moore-Penrose pseudoinverse of a matrix. It computes a pseudoinverse of a matrix ``a``, which is a generalization of the inverse matrix with Singular Value Decomposition (SVD). Note that it automatically removes small singular values for stability. Args: a (cupy.ndarray): The matrix with dimension ``(..., M, N)`` rcond (float or cupy.ndarray): Cutoff parameter for small singular values. For stability it computes the largest singular value denoted by ``s``, and sets all singular values smaller than ``s`` to zero. Broadcasts against the stack of matrices. Returns: cupy.ndarray: The pseudoinverse of ``a`` with dimension ``(..., N, M)``. .. warning:: This function calls one or more cuSOLVER routine(s) which may yield invalid results if input conditions are not met. To detect these invalid results, you can set the `linalg` configuration to a value that is not `ignore` in :func:`cupyx.errstate` or :func:`cupyx.seterr`. .. seealso:: :func:`numpy.linalg.pinv` """""" if a.size == 0: m, n = a.shape[-2:] return cupy.empty(a.shape[:-2] + (n, m), dtype=a.dtype) u, s, vt = _decomposition.svd(a.conj(), full_matrices=False) # discard small singular values if cupy.isscalar(rcond): rcond = cupy.asarray(rcond) cutoff = rcond[..., None] * cupy.amax(s, axis=-1, keepdims=True) leq = s <= cutoff s = 1 / s s[leq] = 0 return cupy.matmul(vt.swapaxes(-2, -1), s[..., None] * u.swapaxes(-2, -1)) ","def pinv(a, rcond=1e-15): """"""Compute the Moore-Penrose pseudoinverse of a matrix. It computes a pseudoinverse of a matrix ``a``, which is a generalization of the inverse matrix with Singular Value Decomposition (SVD). Note that it automatically removes small singular values for stability. Args: a (cupy.ndarray): The matrix with dimension ``(..., M, N)`` rcond (float or cupy.ndarray): Cutoff parameter for small singular values. For stability it computes the largest singular value denoted by ``s``, and sets all singular values smaller than ``s`` to zero. Broadcasts against the stack of matrices. Returns: cupy.ndarray: The pseudoinverse of ``a`` with dimension ``(..., N, M)``. .. warning:: This function calls one or more cuSOLVER routine(s) which may yield invalid results if input conditions are not met. To detect these invalid results, you can set the `linalg` configuration to a value that is not `ignore` in :func:`cupyx.errstate` or :func:`cupyx.seterr`. .. seealso:: :func:`numpy.linalg.pinv` """""" if a.size == 0: m, n = a.shape[-2:] return cupy.empty(a.shape[:-2] + (n, m), dtype=a.dtype) u, s, vt = _decomposition.svd(a.conj(), full_matrices=False) # discard small singular values if cupy.isscalar(rcond): rcond = cupy.asarray(rcond) cutoff = rcond[..., None] * cupy.amax(s, axis=-1, keepdims=True) leq = s <= cutoff cupy.reciprocal(s, out=s) s[leq] = 0 return cupy.matmul(vt.swapaxes(-2, -1), s[..., None] * u.swapaxes(-2, -1)) " 41484,"def plot_results(ax, mutests, tests, test_size=0.05): cls_obs = np.array([test[0] for test in tests]).flatten() cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)] ax.plot(mutests, cls_obs, c='k') for i, c in zip(range(5), ['k', 'k', 'k', 'k', 'k']): ax.plot(mutests, cls_exp[i], c=c, linestyle='dotted' if i != 2 else 'dashed') ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='y') ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='g') ax.plot(mutests, [test_size] * len(mutests), c='r') ax.set_ylim(0, 1) ","def plot_results(ax, mutests, tests, test_size=0.05): cls_obs = np.array([test[0] for test in tests]).flatten() cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)] ax.plot(mutests, cls_obs, c='black') for i, c in zip(range(5), ['k', 'k', 'k', 'k', 'k']): ax.plot(mutests, cls_exp[i], c=c, linestyle='dotted' if i != 2 else 'dashed') ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='y') ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='g') ax.plot(mutests, [test_size] * len(mutests), c='r') ax.set_ylim(0, 1) " 23745,"def unicode_argv(): if iswindows: # Uses shell32.GetCommandLineArgvW to get sys.argv as a list of Unicode # strings. # Versions 2.x of Python don't support Unicode in sys.argv on # Windows, with the underlying Windows API instead replacing multi-byte # characters with '?'. from ctypes import POINTER, byref, cdll, c_int, windll from ctypes.wintypes import LPCWSTR, LPWSTR GetCommandLineW = cdll.kernel32.GetCommandLineW GetCommandLineW.argtypes = [] GetCommandLineW.restype = LPCWSTR CommandLineToArgvW = windll.shell32.CommandLineToArgvW CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)] CommandLineToArgvW.restype = POINTER(LPWSTR) cmd = GetCommandLineW() argc = c_int(0) argv = CommandLineToArgvW(cmd, byref(argc)) if argc.value > 0: # Remove Python executable and commands if present start = argc.value - len(sys.argv) return [argv[i] for i in range(start, argc.value)] return [u""ineptepub.py""] else: argvencoding = sys.stdin.encoding if argvencoding == None: argvencoding = ""utf-8"" return [arg if (type(arg) == six.text_type) else six.text_type(arg,argvencoding) for arg in sys.argv] ","def unicode_argv(): if iswindows: # Uses shell32.GetCommandLineArgvW to get sys.argv as a list of Unicode # strings. # Versions 2.x of Python don't support Unicode in sys.argv on # Windows, with the underlying Windows API instead replacing multi-byte # characters with '?'. from ctypes import POINTER, byref, cdll, c_int, windll from ctypes.wintypes import LPCWSTR, LPWSTR GetCommandLineW = cdll.kernel32.GetCommandLineW GetCommandLineW.argtypes = [] GetCommandLineW.restype = LPCWSTR CommandLineToArgvW = windll.shell32.CommandLineToArgvW CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)] CommandLineToArgvW.restype = POINTER(LPWSTR) cmd = GetCommandLineW() argc = c_int(0) argv = CommandLineToArgvW(cmd, byref(argc)) if argc.value > 0: # Remove Python executable and commands if present start = argc.value - len(sys.argv) return [argv[i] for i in range(start, argc.value)] return [u""ineptepub.py""] else: argvencoding = sys.stdin.encoding if argvencoding == None: argvencoding = ""utf-8"" return [arg if isinstance(arg, six.text_type) else six.text_type(arg,argvencoding) for arg in sys.argv] " 23151,"def read_orc( path, engine=""pyarrow"", columns=None, index=None, split_stripes=1, aggregate_files=None, storage_options=None, ): """"""Read dataframe from ORC file(s) Parameters ---------- path: str or list(str) Location of file(s), which can be a full URL with protocol specifier, and may include glob character if a single string. engine: 'pyarrow' or ORCEngine Backend ORC engine to use for IO. Default is ""pyarrow"". columns: None or list(str) Columns to load. If None, loads all. index: str Column name to set as index. split_stripes: int or False Maximum number of ORC stripes to include in each output-DataFrame partition. Use False to specify a 1-to-1 mapping between files and partitions. Default is 1. aggregate_files : bool, default False Whether distinct file paths may be aggregated into the same output partition. A setting of True means that any two file paths may be aggregated into the same output partition, while False means that inter-file aggregation is prohibited. storage_options: None or dict Further parameters to pass to the bytes backend. Returns ------- Dask.DataFrame (even if there is only one column) Examples -------- >>> df = dd.read_orc('https://github.com/apache/orc/raw/' ... 'master/examples/demo-11-zlib.orc') # doctest: +SKIP """""" # Get engine engine = _get_engine(engine) # Process file path(s) storage_options = storage_options or {} fs, fs_token, paths = get_fs_token_paths( path, mode=""rb"", storage_options=storage_options ) # Let backend engine generate a list of parts # from the ORC metadata. The backend should also # return the schema and DataFrame-collection metadata parts, schema, meta = engine.read_metadata( fs, paths, columns, index, split_stripes, aggregate_files, ) # Construct the output collection with from_map return from_map( ORCFunctionWrapper(fs, columns, schema, engine, index), parts, meta=meta, divisions=[None] * (len(parts) + 1), label=""read-orc-"", token=tokenize(fs_token, path, columns), enforce_metadata=False, ) ","def read_orc( path, engine=""pyarrow"", columns=None, index=None, split_stripes=1, aggregate_files=None, storage_options=None, ): """"""Read dataframe from ORC file(s) Parameters ---------- path: str or list(str) Location of file(s), which can be a full URL with protocol specifier, and may include glob character if a single string. engine: 'pyarrow' or ORCEngine Backend ORC engine to use for IO. Default is ""pyarrow"". columns: None or list(str) Columns to load. If None, loads all. index: str Column name to set as index. split_stripes: int or False Maximum number of ORC stripes to include in each output-DataFrame partition. Use False to specify a 1-to-1 mapping between files and partitions. Default is 1. aggregate_files : bool, default False Whether distinct file paths may be aggregated into the same output partition. A setting of True means that any two file paths may be aggregated into the same output partition, while False means that inter-file aggregation is prohibited. storage_options: None or dict Further parameters to pass to the bytes backend. Returns ------- Dask.DataFrame (even if there is only one column) Examples -------- >>> df = dd.read_orc('https://github.com/apache/orc/raw/' ... 'master/examples/demo-11-zlib.orc') # doctest: +SKIP """""" # Get engine engine = _get_engine(engine) # Process file path(s) storage_options = storage_options or {} fs, fs_token, paths = get_fs_token_paths( path, mode=""rb"", storage_options=storage_options ) # Let backend engine generate a list of parts # from the ORC metadata. The backend should also # return the schema and DataFrame-collection metadata parts, schema, meta = engine.read_metadata( fs, paths, columns, index, split_stripes, aggregate_files, ) # Construct the output collection with from_map return from_map( ORCFunctionWrapper(fs, columns, schema, engine, index), parts, meta=meta, divisions=[None] * (len(parts) + 1), label=""read-orc"", token=tokenize(fs_token, path, columns), enforce_metadata=False, ) " 28434,"def build_kwargs(signature: inspect.Signature, default_kwargs, dfv: Any = DEFAULT_NONE): kws = {} for name, param in signature.parameters.items(): # For required params we need to pass something if param.default == param.empty: # Some special casing if name == 'permissions': kws[name] = ChatPermissions() elif name in ['prices', 'commands', 'errors']: kws[name] = [] elif name == 'media': media = InputMediaPhoto('media', parse_mode=dfv) if 'list' in str(param.annotation).lower(): kws[name] = [media] else: kws[name] = media elif name == 'results': itmc = InputTextMessageContent( 'text', parse_mode=dfv, disable_web_page_preview=dfv ) kws[name] = [ InlineQueryResultArticle('id', 'title', input_message_content=itmc), InlineQueryResultCachedPhoto( 'id', 'photo_file_id', parse_mode=dfv, input_message_content=itmc ), ] elif name == 'ok': kws['ok'] = False kws['error_message'] = 'error' else: kws[name] = True # pass values for params that can have defaults only if we don't want to use the # standard default elif name in default_kwargs: if dfv != DEFAULT_NONE: kws[name] = dfv # Some special casing for methods that have ""exactly one of the optionals"" type args elif name in ['location', 'contact', 'venue', 'inline_message_id']: kws[name] = True elif name == 'until_date': if dfv == 'non-None-value': # Europe/Berlin kws[name] = pytz.timezone('Europe/Berlin').localize( datetime.datetime(2000, 1, 1, 0) ) else: # UTC kws[name] = datetime.datetime(2000, 1, 1, 0) return kws ","def build_kwargs(signature: inspect.Signature, default_kwargs, dfv: Any = DEFAULT_NONE): kws = {} for name, param in signature.parameters.items(): # For required params we need to pass something if param.default is inspect.Parameter.empty: # Some special casing if name == 'permissions': kws[name] = ChatPermissions() elif name in ['prices', 'commands', 'errors']: kws[name] = [] elif name == 'media': media = InputMediaPhoto('media', parse_mode=dfv) if 'list' in str(param.annotation).lower(): kws[name] = [media] else: kws[name] = media elif name == 'results': itmc = InputTextMessageContent( 'text', parse_mode=dfv, disable_web_page_preview=dfv ) kws[name] = [ InlineQueryResultArticle('id', 'title', input_message_content=itmc), InlineQueryResultCachedPhoto( 'id', 'photo_file_id', parse_mode=dfv, input_message_content=itmc ), ] elif name == 'ok': kws['ok'] = False kws['error_message'] = 'error' else: kws[name] = True # pass values for params that can have defaults only if we don't want to use the # standard default elif name in default_kwargs: if dfv != DEFAULT_NONE: kws[name] = dfv # Some special casing for methods that have ""exactly one of the optionals"" type args elif name in ['location', 'contact', 'venue', 'inline_message_id']: kws[name] = True elif name == 'until_date': if dfv == 'non-None-value': # Europe/Berlin kws[name] = pytz.timezone('Europe/Berlin').localize( datetime.datetime(2000, 1, 1, 0) ) else: # UTC kws[name] = datetime.datetime(2000, 1, 1, 0) return kws " 17535,"def get_env_vars_to_add(tools_to_activate, system, user): env_vars_to_add = [] newpath, added_path = adjusted_path(tools_to_activate, system, user) # Don't bother setting the path if there are no changes. if os.environ['PATH'] != newpath: env_vars_to_add += [('PATH', newpath)] if added_path: errlog('Adding directories to PATH:') for item in added_path: errlog('PATH += ' + item) errlog('') # A core variable EMSDK points to the root of Emscripten SDK directory. env_vars_to_add += [('EMSDK', to_unix_path(emsdk_path()))] env_vars_to_add += [('EM_CONFIG', os.path.normpath(dot_emscripten_path()))] for tool in tools_to_activate: config = tool.activated_config() if 'EMSCRIPTEN_ROOT' in config: # For older emscripten versions that don't use an embedded cache by # default we need to export EM_CACHE. # # Sadly, we can't put this in the config file since those older versions # also didn't read the `CACHE` key from the config file: # # History: # - 'CACHE' config started being honored in 1.39.16 # https://github.com/emscripten-core/emscripten/pull/11091 # - Default to embedded cache started in 1.39.16 # https://github.com/emscripten-core/emscripten/pull/#11126 # # Since setting EM_CACHE in the environment effects the entire machine # we want to avoid this except when installing these older emscripten # versions that really need it. version = parse_emscripten_version(config['EMSCRIPTEN_ROOT']) if version <= [1, 39, 16]: em_cache_dir = os.path.join(config['EMSCRIPTEN_ROOT'], 'cache') env_vars_to_add += [('EM_CACHE', em_cache_dir)] envs = tool.activated_environment() for env in envs: key, value = parse_key_value(env) value = to_native_path(tool.expand_vars(value)) env_vars_to_add += [(key, value)] return env_vars_to_add ","def get_env_vars_to_add(tools_to_activate, system, user): env_vars_to_add = [] newpath, added_path = adjusted_path(tools_to_activate, system, user) # Don't bother setting the path if there are no changes. if os.environ['PATH'] != newpath: env_vars_to_add += [('PATH', newpath)] if added_path: errlog('Adding directories to PATH:') for item in added_path: errlog('PATH += ' + item) errlog('') # A core variable EMSDK points to the root of Emscripten SDK directory. env_vars_to_add += [('EMSDK', to_unix_path(emsdk_path()))] env_vars_to_add += [('EM_CONFIG', os.path.normpath(dot_emscripten_path()))] for tool in tools_to_activate: config = tool.activated_config() if 'EMSCRIPTEN_ROOT' in config: # For older emscripten versions that don't use an embedded cache by # default we need to export EM_CACHE. # # Sadly, we can't put this in the config file since those older versions # also didn't read the `CACHE` key from the config file: # # History: # - 'CACHE' config started being honored in 1.39.16 # https://github.com/emscripten-core/emscripten/pull/11091 # - Default to embedded cache started in 1.39.16 # https://github.com/emscripten-core/emscripten/pull/#11126 # # Since setting EM_CACHE in the environment effects the entire machine # we want to avoid this except when installing these older emscripten # versions that really need it. version = parse_emscripten_version(config['EMSCRIPTEN_ROOT']) if version < [1, 39, 16]: em_cache_dir = os.path.join(config['EMSCRIPTEN_ROOT'], 'cache') env_vars_to_add += [('EM_CACHE', em_cache_dir)] envs = tool.activated_environment() for env in envs: key, value = parse_key_value(env) value = to_native_path(tool.expand_vars(value)) env_vars_to_add += [(key, value)] return env_vars_to_add " 33883,"def is_same_ray_options(ray_options_1: Dict, ray_options_2: Dict) -> bool: resources_1 = _resources_from_ray_options(ray_options_1) resources_2 = _resources_from_ray_options(ray_options_2) if resources_1 != resources_2: return False resources = (""num_cpus"", ""num_gpus"", ""memory"", ""object_store_memory"", ""accelerator_type"", ""resources"") ray_options_1 = ray_options_1.copy() ray_options_2 = ray_options_2.copy() for res in resources: ray_options_1.pop(res, None) ray_options_2.pop(res, None) return ray_options_1 == ray_options_2 ","def resources_equal(ray_options_1: Dict, ray_options_2: Dict) -> bool: resources_1 = _resources_from_ray_options(ray_options_1) resources_2 = _resources_from_ray_options(ray_options_2) if resources_1 != resources_2: return False resources = (""num_cpus"", ""num_gpus"", ""memory"", ""object_store_memory"", ""accelerator_type"", ""resources"") ray_options_1 = ray_options_1.copy() ray_options_2 = ray_options_2.copy() for res in resources: ray_options_1.pop(res, None) ray_options_2.pop(res, None) return ray_options_1 == ray_options_2 " 5531,"def repository_url_validator(url): # Regular URLs validator = URLValidator([""http"", ""https"", ""ftp"", ""ftps"", ""ssh"", ""svn+ssh""]) # Git SCP-like URL pattern = r""(?Pgit@([\w\.@]+)(/|:))(?P[\w,\-,\_]+)/(?P[\w,\-,\_]+)(.git){0,1}((/){0,1})"" try: validator(url) except ValidationError: if not re.match(pattern, url): raise ValidationError(""Invalid URL"") ","def repository_url_validator(url): # Regular URLs validator = URLValidator([""http"", ""https"", ""ftp"", ""ftps"", ""ssh"", ""svn+ssh""]) # Git SCP-like URL pattern = r""git@[\w\.@]+[/:][\w-]+/[\w-]+(.git)?/?"" try: validator(url) except ValidationError: if not re.match(pattern, url): raise ValidationError(""Invalid URL"") " 11693,"def create_markdown(context: GraphQLContext) -> Markdown: return create_markdown_hook.call_action( create_markdown_action, context, BlockParser(), InlineParser(AstRenderer()), PLUGINS.values(), ) ","def create_markdown(context: GraphQLContext) -> Markdown: return create_markdown_hook.call_action( create_markdown_action, context, BlockParser(), InlineParser(AstRenderer()), default_plugins, ) " 42348,"def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs): ''' Run an ``ansible-doc`` command to get a role argument specification. .. note:: Version added: 2.2 :param str role: Simple role name, or fully qualified collection role name, to query. :param str collection: If specified, will be combined with the role name to form a fully qualified collection role name. If this is supplied, the ``role`` param should not be fully qualified. :param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles. :param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``. :param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be the work directory within container. :param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param bool quiet: Disable all output :param bool json_mode: Store event data in place of stdout on the console and in the stdout file :param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation (based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution. :param bool process_isolation: Enable process isolation, using a container engine (e.g. podman). :param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param list container_options: List of container options to pass to execution engine. :param str container_workdir: The working directory within the container. :param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param str private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param str ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution. Default value is 'False' :returns: A tuple of response and error string. The response is a python dictionary object (as returned by ansible-doc JSON output) containing each role found, or an empty dict if none are found. ''' event_callback_handler = kwargs.pop('event_handler', None) status_callback_handler = kwargs.pop('status_handler', None) artifacts_handler = kwargs.pop('artifacts_handler', None) cancel_callback = kwargs.pop('cancel_callback', None) finished_callback = kwargs.pop('finished_callback', None) rd = DocConfig(**kwargs) rd.prepare_role_argspec_command(role, collection, playbook_dir) r = Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler, cancel_callback=cancel_callback, finished_callback=finished_callback) r.run() response = r.stdout.read() error = r.stderr.read() if response: response = json.loads(sanitize_json_response(response)) return response, error ","def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs): ''' Run an ``ansible-doc`` command to get a role argument specification. .. note:: Version added: 2.2 :param str role: Simple role name, or fully qualified collection role name, to query. :param str collection: If specified, will be combined with the role name to form a fully qualified collection role name. If this is supplied, the ``role`` param should not be fully qualified. :param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles. :param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``. :param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be the work directory within container. :param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param bool quiet: Disable all output :param bool json_mode: Store event data in place of stdout on the console and in the stdout file :param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation (based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution. :param bool process_isolation: Enable process isolation, using a container engine (e.g. podman). :param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param list container_options: List of container options to pass to execution engine. :param str container_workdir: The working directory within the container. :param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param str private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param str ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param function status_handler: An optional callback that will be invoked any time the status changes (for example, started, running, failed, successful, timeout) :param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution. Default value is 'False' :returns: A tuple of response and error string. The response is a python dictionary object (as returned by ansible-doc JSON output) containing each role found, or an empty dict if none are found. ''' event_callback_handler = kwargs.pop('event_handler', None) status_callback_handler = kwargs.pop('status_handler', None) artifacts_handler = kwargs.pop('artifacts_handler', None) cancel_callback = kwargs.pop('cancel_callback', None) finished_callback = kwargs.pop('finished_callback', None) rd = DocConfig(**kwargs) rd.prepare_role_argspec_command(role, collection, playbook_dir) r = Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler, cancel_callback=cancel_callback, finished_callback=finished_callback) r.run() response = r.stdout.read() error = r.stderr.read() if response: response = json.loads(sanitize_json_response(response)) return response, error " 41224,"def state_vector_to_probabilities(state_vector: 'cirq.STATE_VECTOR_LIKE') -> np.ndarray: normalized_vector = format(state_vector) return np.abs(normalized_vector) ** 2 ","def state_vector_to_probabilities(state_vector: 'cirq.STATE_VECTOR_LIKE') -> np.ndarray: valid_state_vector = to_valid_state_vector(state_vector) return np.abs(normalized_vector) ** 2 " 30465,"def mirror_investigation(): """""" Update the integration context with a new or existing mirror. """""" mirror_type = demisto.args().get('type', 'all') investigation = demisto.investigation() if investigation.get('type') == PLAYGROUND_INVESTIGATION_TYPE: return_error('Can not perform this action in playground.') investigation_id = investigation.get('id') integration_context = demisto.getIntegrationContext() if 'mirrors' not in integration_context: mirrors: list = [] else: mirrors = json.loads(integration_context['mirrors']) mirror_filter = list(filter(lambda m: m['investigation_id'] == investigation_id, mirrors)) if mirror_filter: # Delete existing mirror mirrors.pop(mirrors.index(mirror_filter[0])) mirror = { 'investigation_id': investigation.get('id'), 'mirror_type': mirror_type, 'mirrored': False } mirrors.append(mirror) demisto.setIntegrationContext({'mirrors': json.dumps(mirrors)}) demisto.results('Investigation mirrored successfully.') ","def mirror_investigation(): """""" Update the integration context with a new or existing mirror. """""" mirror_type = demisto.args().get('type', 'all') investigation = demisto.investigation() if investigation.get('type') == PLAYGROUND_INVESTIGATION_TYPE: return_error('Can not perform this action in the playground.') investigation_id = investigation.get('id') integration_context = demisto.getIntegrationContext() if 'mirrors' not in integration_context: mirrors: list = [] else: mirrors = json.loads(integration_context['mirrors']) mirror_filter = list(filter(lambda m: m['investigation_id'] == investigation_id, mirrors)) if mirror_filter: # Delete existing mirror mirrors.pop(mirrors.index(mirror_filter[0])) mirror = { 'investigation_id': investigation.get('id'), 'mirror_type': mirror_type, 'mirrored': False } mirrors.append(mirror) demisto.setIntegrationContext({'mirrors': json.dumps(mirrors)}) demisto.results('Investigation mirrored successfully.') " 24907,"def run_pyreverse(argv: Optional[Sequence[str]] = None): """"""Run pyreverse Arguments can be a list of strings normally supplied as arguments on the command line """""" from pylint.pyreverse.main import Run as PyreverseRun PyreverseRun(argv or sys.argv[1:]) ","def run_pyreverse(argv: Optional[Sequence[str]] = None): """"""Run pyreverse argv can be a list of strings normally supplied as arguments on the command line """""" from pylint.pyreverse.main import Run as PyreverseRun PyreverseRun(argv or sys.argv[1:]) " 756,"def axial_kurtosis(dki_params, min_kurtosis=-3./7, max_kurtosis=10, analytical=True): r"""""" Computes axial Kurtosis (AK) from the kurtosis tensor [1]_, [2]_. Parameters ---------- dki_params : ndarray (x, y, z, 27) or (n, 27) All parameters estimated from the diffusion kurtosis model. Parameters are ordered as follows: 1) Three diffusion tensor's eigenvalues 2) Three lines of the eigenvector matrix each containing the first, second and third coordinates of the eigenvector 3) Fifteen elements of the kurtosis tensor min_kurtosis : float (optional) To keep kurtosis values within a plausible biophysical range, axial kurtosis values that are smaller than `min_kurtosis` are replaced with `min_kurtosis`. Default = -3./7 (theoretical kurtosis limit for regions that consist of water confined to spherical pores [3]_) max_kurtosis : float (optional) To keep kurtosis values within a plausible biophysical range, axial kurtosis values that are larger than `max_kurtosis` are replaced with `max_kurtosis`. Default = 10 analytical : bool (optional) If True, AK is calculated from rotated diffusion kurtosis tensor, otherwise it will be computed from the apparent diffusion kurtosis values along the principal axis of the diffusion tensor (see notes). Default is set to True. Returns ------- ak : array Calculated AK. Notes ----- AK is defined as the directional kurtosis parallel to the fiber's main direction e1 [1]_, [2]_. You can compute AK using to approaches: 1) AK is calculated from rotated diffusion kurtosis tensor [2]_, i.e.: .. math:: AK = \hat{W}_{1111} \frac{(\lambda_{1}+\lambda_{2}+\lambda_{3})^2}{(9 \lambda_{1}^2)} 2) AK can be sampled from the principal axis of the diffusion tensor: .. math:: AK = K(\mathbf{\mathbf{e}_1) Although both approaches leads to an exactly calculation of AK, the first approach will be refered to as the analytical method whilte the second approach will be refered to as the numerical method based on their analogy to the estimation strategies for MK and RK. References ---------- .. [1] Jensen, J.H., Helpern, J.A., 2010. MRI quantification of non-Gaussian water diffusion by kurtosis analysis. NMR in Biomedicine 23(7): 698-710 .. [2] Tabesh, A., Jensen, J.H., Ardekani, B.A., Helpern, J.A., 2011. Estimation of tensors and tensor-derived measures in diffusional kurtosis imaging. Magn Reson Med. 65(3), 823-836 .. [3] Barmpoutis, A., & Zhuo, J., 2011. Diffusion kurtosis imaging: Robust estimation from DW-MRI using homogeneous polynomials. Proceedings of the 8th {IEEE} International Symposium on Biomedical Imaging: From Nano to Macro, ISBI 2011, 262-265. doi: 10.1109/ISBI.2011.5872402 """""" # Flat parameters outshape = dki_params.shape[:-1] dki_params = dki_params.reshape((-1, dki_params.shape[-1])) # Split data evals, evecs, kt = split_dki_param(dki_params) # Initialize AK AK = np.zeros(kt.shape[:-1]) # select relevant voxels to process rel_i = _positive_evals(evals[..., 0], evals[..., 1], evals[..., 2]) kt = kt[rel_i] evecs = evecs[rel_i] evals = evals[rel_i] AKi = AK[rel_i] # Compute mean diffusivity md = mean_diffusivity(evals) if analytical: # Rotate the kurtosis tensor from the standard Cartesian coordinate # system to another coordinate system in which the 3 orthonormal # eigenvectors of DT are the base coordinate Wxxxx = Wrotate_element(kt, 0, 0, 0, 0, evecs) AKi = Wxxxx * (md ** 2) / (evals[..., 0] ** 2) else: # Compute apparent directional kurtosis along evecs[0] dt = lower_triangular(vec_val_vect(evecs, evals)) for vox in range(len(kt)): AKi[vox] = directional_kurtosis(dt[vox], md[vox], kt[vox], np.array([evecs[vox, :, 0]])) # reshape data according to input data AK[rel_i] = AKi if min_kurtosis is not None: AK = AK.clip(min=min_kurtosis) if max_kurtosis is not None: AK = AK.clip(max=max_kurtosis) return AK.reshape(outshape) ","def axial_kurtosis(dki_params, min_kurtosis=-3./7, max_kurtosis=10, analytical=True): r"""""" Computes axial Kurtosis (AK) from the kurtosis tensor [1]_, [2]_. Parameters ---------- dki_params : ndarray (x, y, z, 27) or (n, 27) All parameters estimated from the diffusion kurtosis model. Parameters are ordered as follows: 1) Three diffusion tensor's eigenvalues 2) Three lines of the eigenvector matrix each containing the first, second and third coordinates of the eigenvector 3) Fifteen elements of the kurtosis tensor min_kurtosis : float (optional) To keep kurtosis values within a plausible biophysical range, axial kurtosis values that are smaller than `min_kurtosis` are replaced with `min_kurtosis`. Default = -3./7 (theoretical kurtosis limit for regions that consist of water confined to spherical pores [3]_) max_kurtosis : float (optional) To keep kurtosis values within a plausible biophysical range, axial kurtosis values that are larger than `max_kurtosis` are replaced with `max_kurtosis`. Default = 10 analytical : bool (optional) If True, AK is calculated from rotated diffusion kurtosis tensor, otherwise it will be computed from the apparent diffusion kurtosis values along the principal axis of the diffusion tensor (see notes). Default is set to True. Returns ------- ak : array Calculated AK. Notes ----- AK is defined as the directional kurtosis parallel to the fiber's main direction e1 [1]_, [2]_. You can compute AK using to approaches: 1) AK is calculated from rotated diffusion kurtosis tensor [2]_, i.e.: .. math:: AK = \hat{W}_{1111} \frac{(\lambda_{1}+\lambda_{2}+\lambda_{3})^2}{(9 \lambda_{1}^2)} 2) AK can be sampled from the principal axis of the diffusion tensor: .. math:: AK = K(\mathbf{\mathbf{e}_1) Although both approaches leads to an exactly calculation of AK, the first approach will be referred to as the analytical method while the second approach will be refered to as the numerical method based on their analogy to the estimation strategies for MK and RK. References ---------- .. [1] Jensen, J.H., Helpern, J.A., 2010. MRI quantification of non-Gaussian water diffusion by kurtosis analysis. NMR in Biomedicine 23(7): 698-710 .. [2] Tabesh, A., Jensen, J.H., Ardekani, B.A., Helpern, J.A., 2011. Estimation of tensors and tensor-derived measures in diffusional kurtosis imaging. Magn Reson Med. 65(3), 823-836 .. [3] Barmpoutis, A., & Zhuo, J., 2011. Diffusion kurtosis imaging: Robust estimation from DW-MRI using homogeneous polynomials. Proceedings of the 8th {IEEE} International Symposium on Biomedical Imaging: From Nano to Macro, ISBI 2011, 262-265. doi: 10.1109/ISBI.2011.5872402 """""" # Flat parameters outshape = dki_params.shape[:-1] dki_params = dki_params.reshape((-1, dki_params.shape[-1])) # Split data evals, evecs, kt = split_dki_param(dki_params) # Initialize AK AK = np.zeros(kt.shape[:-1]) # select relevant voxels to process rel_i = _positive_evals(evals[..., 0], evals[..., 1], evals[..., 2]) kt = kt[rel_i] evecs = evecs[rel_i] evals = evals[rel_i] AKi = AK[rel_i] # Compute mean diffusivity md = mean_diffusivity(evals) if analytical: # Rotate the kurtosis tensor from the standard Cartesian coordinate # system to another coordinate system in which the 3 orthonormal # eigenvectors of DT are the base coordinate Wxxxx = Wrotate_element(kt, 0, 0, 0, 0, evecs) AKi = Wxxxx * (md ** 2) / (evals[..., 0] ** 2) else: # Compute apparent directional kurtosis along evecs[0] dt = lower_triangular(vec_val_vect(evecs, evals)) for vox in range(len(kt)): AKi[vox] = directional_kurtosis(dt[vox], md[vox], kt[vox], np.array([evecs[vox, :, 0]])) # reshape data according to input data AK[rel_i] = AKi if min_kurtosis is not None: AK = AK.clip(min=min_kurtosis) if max_kurtosis is not None: AK = AK.clip(max=max_kurtosis) return AK.reshape(outshape) " 951,"def _linear_system(eqs, v): """"""return, without simplification, the list of rows containing coefficients of the objects in v for each equation in eqs, and a list of expressions representing the ""constant"" terms of each equation. The constant terms might have a dependency on the values of v if the equations are not full expanded or if a factor in a term of an equation contains but does not equal an element of v. Raises ====== NonlinearError A term in an expression two or more factors from v Examples ======== >>> from sympy.abc import x, y, z >>> from sympy.solvers.solveset import _linear_system, linsolve >>> from sympy import Matrix >>> _linear_system([x + y - 3, x - y - 1], [x, y]) ([[1, 1], [1, -1]], [-3, -1]) If the return results are going to be used to solve for v, the constants will have to be negated. >>> A, b = Matrix(_[0]), Matrix([-i for i in _[1]]) >>> linsolve(A.hstack(A, b)) {(2, 1)} The implicit nature of the coefficients is seen in the following where `x**y` depends on `x` but, not being `x`, is assumed to be independent: >>> _linear_system([z*x + 2*x + y, x**y], [x, y]) ([[z + 2, 1], [0, 0]], [0, x**y]) But an error will be raised if two of the specified values appear as literal factors in a single term: >>> _linear_system([x*y - 3], [x, y]) Traceback (most recent call last): ... NonlinearError: nonlinear term: x*y The error is not raised if a factor contains a value of v in a non-literal way, however, so use this function with caution: >>> _linear_system([x*(y + 1) + 3], [x, y]) ([y + 1, 0]], [3]) """""" rows = [] cons = [] x = list(v) for e in eqs: c, t = e.as_coeff_add(*x) c = [c] r = [[] for _ in range(len(x))] rows.append(r) for ti in t: ci, f = ti.as_coeff_mul() hit = [0]*len(f) for i, fi in enumerate(f): if fi in x: hit[i] = 1 if not any(hit): c.append(ti) else: if sum(hit) != 1: raise NonlinearError('nonlinear term: %s' % ti) f = list(f) loc = hit.index(1) fi = f.pop(loc) if ci is not S.One: f.insert(0, ci) r[x.index(fi)].append(Mul._from_args(f)) cons.append(Add(*c)) return [[Add(*i) for i in r] for r in rows], cons ","def _linear_system(eqs, v): """"""return, without simplification, the list of rows containing coefficients of the objects in v for each equation in eqs, and a list of expressions representing the ""constant"" terms of each equation. The constant terms might have a dependency on the values of v if the equations are not full expanded or if a factor in a term of an equation contains but does not equal an element of v. Raises ====== NonlinearError A term in an expression two or more factors from v Examples ======== >>> from sympy.abc import x, y, z >>> from sympy.solvers.solveset import _linear_system, linsolve >>> from sympy import Matrix >>> _linear_system([x + y - 3, x - y - 1], [x, y]) ([[1, 1], [1, -1]], [-3, -1]) If the return results are going to be used to solve for v, the constants will have to be negated. >>> A, b = Matrix(_[0]), Matrix([-i for i in _[1]]) >>> linsolve(A.hstack(A, b)) {(2, 1)} The implicit nature of the coefficients is seen in the following where `x**y` depends on `x` but, not being `x`, is assumed to be independent: >>> _linear_system([z*x + 2*x + y, x**y], [x, y]) ([[z + 2, 1], [0, 0]], [0, x**y]) But an error will be raised if two of the specified values appear as literal factors in a single term: >>> _linear_system([x*y - 3], [x, y]) Traceback (most recent call last): ... NonlinearError: nonlinear term: x*y The error is not raised if a factor contains a value of v in a non-literal way, however, so use this function with caution: >>> _linear_system([x*(y + 1) + 3], [x, y]) ([[y + 1, 0]], [3]) """""" rows = [] cons = [] x = list(v) for e in eqs: c, t = e.as_coeff_add(*x) c = [c] r = [[] for _ in range(len(x))] rows.append(r) for ti in t: ci, f = ti.as_coeff_mul() hit = [0]*len(f) for i, fi in enumerate(f): if fi in x: hit[i] = 1 if not any(hit): c.append(ti) else: if sum(hit) != 1: raise NonlinearError('nonlinear term: %s' % ti) f = list(f) loc = hit.index(1) fi = f.pop(loc) if ci is not S.One: f.insert(0, ci) r[x.index(fi)].append(Mul._from_args(f)) cons.append(Add(*c)) return [[Add(*i) for i in r] for r in rows], cons " 30772,"def generate_policy_data_body(args): severity = args.get('severity') action = args.get('action') followed_action = args.get('followed-action') body = {} if args.get('enabled'): body['enabled'] = True if args['enabled'] == 'True' else False if args.get('one-alert-per-session'): body['oneAlertPerSession'] = True if args['one-alert-per-session'] == 'True' else False if args.get('display-response-page'): body['displayResponsePage'] = True if args['display-response-page'] == 'True' else False if severity: body['severity'] = severity if action: body['action'] = action if followed_action: body['followedAction'] = followed_action return body ","def generate_policy_data_body(args): severity = args.get('severity') action = args.get('action') followed_action = args.get('followed-action') body = {} if args.get('enabled'): body['enabled'] = args['enabled'] == 'True' if args.get('one-alert-per-session'): body['oneAlertPerSession'] = True if args['one-alert-per-session'] == 'True' else False if args.get('display-response-page'): body['displayResponsePage'] = True if args['display-response-page'] == 'True' else False if severity: body['severity'] = severity if action: body['action'] = action if followed_action: body['followedAction'] = followed_action return body " 49874,"def dc_ohmic_losses(ohms, current): """""" Returns ohmic losses in in units of power from the equivalent resistance of of the wires and the operating current. Parameters ---------- ohms: numeric, float current: numeric, float or array-like Returns ---------- numeric Single or array-like value of the losses in units of power References ---------- -- [1] PVsyst 7 Help. ""Array ohmic wiring loss"". https://www.pvsyst.com/help/ohmic_loss.htm """""" return ohms * current * current ","def dc_ohmic_losses(ohms, current): """""" Returns ohmic losses in units of power from the equivalent resistance of the wires and the operating current. Parameters ---------- ohms: numeric, float current: numeric, float or array-like Returns ---------- numeric Single or array-like value of the losses in units of power References ---------- -- [1] PVsyst 7 Help. ""Array ohmic wiring loss"". https://www.pvsyst.com/help/ohmic_loss.htm """""" return ohms * current * current " 7456,"def adapted_rand_error(image_true=None, image_test=None, *, table=None, ignore_labels=(0,), alpha=0.5): r""""""Compute Adapted Rand error as defined by the SNEMI3D contest. [1]_ Parameters ---------- image_true : ndarray of int Ground-truth label image, same shape as im_test. image_test : ndarray of int Test image. table : scipy.sparse array in crs format, optional A contingency table built with skimage.evaluate.contingency_table. If None, it will be computed on the fly. ignore_labels : sequence of int, optional Labels to ignore. Any part of the true image labeled with any of these values will not be counted in the score. alpha : float, optional A float value bounded [0,1] controlling the relative weight given to precision and recall in the adapted Rand error calculation. Default is to weight precision and recall equally. When alpha = 0, adapted Rand error = recall. When alpha = 1, adapted Rand error = precision. Returns ------- are : float The adapted Rand error; equal to :math:`1 - \frac{\sum_{ij} p_{ij}^{2}}{\alpha \sum_{k} s_{k}^{2} + (1-\alpha)\sum_{k} t_{k}^{2}}`, where :math:`p_{ij}` is the probability that a pixel has the same label in the test image *and* in the true image, :math:`t_{k}` is the probability that a pixel has label :math:`k` in the true image, and :math:`s_{k}` is the probability that a pixel has label :math:`k` in the test image. prec : float The adapted Rand precision: this is the number of pairs of pixels that have the same label in the test label image *and* in the true image, divided by the number in the test image. rec : float The adapted Rand recall: this is the number of pairs of pixels that have the same label in the test label image *and* in the true image, divided by the number in the true image. Notes ----- Pixels with label 0 in the true segmentation are ignored in the score. References ---------- .. [1] Arganda-Carreras I, Turaga SC, Berger DR, et al. (2015) Crowdsourcing the creation of image segmentation algorithms for connectomics. Front. Neuroanat. 9:142. :DOI:`10.3389/fnana.2015.00142` """""" if image_test is not None and image_true is not None: check_shape_equality(image_true, image_test) if table is None: p_ij = contingency_table(image_true, image_test, ignore_labels=ignore_labels, normalize=False) else: p_ij = table if alpha < 0.0 or alpha > 1.0: raise ValueError('alpha must be between 0 and 1') # Sum of the joint distribution squared sum_p_ij2 = p_ij.data @ p_ij.data - p_ij.sum() a_i = p_ij.sum(axis=1).A.ravel() b_i = p_ij.sum(axis=0).A.ravel() # Sum of squares of the test segment sizes (this is 2x the number of pairs # of pixels with the same label in im_test) sum_a2 = a_i @ a_i - a_i.sum() # Same for im_true sum_b2 = b_i @ b_i - b_i.sum() precision = sum_p_ij2 / sum_a2 recall = sum_p_ij2 / sum_b2 fscore = sum_p_ij2 / (alpha * sum_a2 + (1 - alpha) * sum_b2) are = 1. - fscore return are, precision, recall ","def adapted_rand_error(image_true=None, image_test=None, *, table=None, ignore_labels=(0,), alpha=0.5): r""""""Compute Adapted Rand error as defined by the SNEMI3D contest. [1]_ Parameters ---------- image_true : ndarray of int Ground-truth label image, same shape as im_test. image_test : ndarray of int Test image. table : scipy.sparse array in crs format, optional A contingency table built with skimage.evaluate.contingency_table. If None, it will be computed on the fly. ignore_labels : sequence of int, optional Labels to ignore. Any part of the true image labeled with any of these values will not be counted in the score. alpha : float, optional Relative weight given to precision and recall in the adapted Rand error calculation. Default is to weight precision and recall equally. When alpha = 0, adapted Rand error = recall. When alpha = 1, adapted Rand error = precision. Returns ------- are : float The adapted Rand error; equal to :math:`1 - \frac{\sum_{ij} p_{ij}^{2}}{\alpha \sum_{k} s_{k}^{2} + (1-\alpha)\sum_{k} t_{k}^{2}}`, where :math:`p_{ij}` is the probability that a pixel has the same label in the test image *and* in the true image, :math:`t_{k}` is the probability that a pixel has label :math:`k` in the true image, and :math:`s_{k}` is the probability that a pixel has label :math:`k` in the test image. prec : float The adapted Rand precision: this is the number of pairs of pixels that have the same label in the test label image *and* in the true image, divided by the number in the test image. rec : float The adapted Rand recall: this is the number of pairs of pixels that have the same label in the test label image *and* in the true image, divided by the number in the true image. Notes ----- Pixels with label 0 in the true segmentation are ignored in the score. References ---------- .. [1] Arganda-Carreras I, Turaga SC, Berger DR, et al. (2015) Crowdsourcing the creation of image segmentation algorithms for connectomics. Front. Neuroanat. 9:142. :DOI:`10.3389/fnana.2015.00142` """""" if image_test is not None and image_true is not None: check_shape_equality(image_true, image_test) if table is None: p_ij = contingency_table(image_true, image_test, ignore_labels=ignore_labels, normalize=False) else: p_ij = table if alpha < 0.0 or alpha > 1.0: raise ValueError('alpha must be between 0 and 1') # Sum of the joint distribution squared sum_p_ij2 = p_ij.data @ p_ij.data - p_ij.sum() a_i = p_ij.sum(axis=1).A.ravel() b_i = p_ij.sum(axis=0).A.ravel() # Sum of squares of the test segment sizes (this is 2x the number of pairs # of pixels with the same label in im_test) sum_a2 = a_i @ a_i - a_i.sum() # Same for im_true sum_b2 = b_i @ b_i - b_i.sum() precision = sum_p_ij2 / sum_a2 recall = sum_p_ij2 / sum_b2 fscore = sum_p_ij2 / (alpha * sum_a2 + (1 - alpha) * sum_b2) are = 1. - fscore return are, precision, recall " 32022,"def get_search_results(client: Client, parser: Callable) -> Union[list, str]: """""" Searches google and returns a result using the parser"""""" results = [] query = client.build_query() all_pages_found = False pages = calculate_pages(client.max_results) current_page = 0 start = 0 while not all_pages_found and current_page < pages: results_in_page = calculate_page_size(current_page, client.max_results) page = client.search(query, start, results_in_page) items = page.get('items') if not items: break for item in items: results.extend(parser(client, item)) # prepare next run current_page += 1 start += results_in_page total = int(demisto.get(page, 'searchInformation.totalResults', demisto.get(page, 'queries.request.totalResults', 0))) all_pages_found = total < start if not results: return ""No results found"" return results ","def get_search_results(client: Client, parser: Callable) -> Union[list, str]: """""" Searches google and returns a result using the parser"""""" results = [] query = client.build_query() all_pages_found = False pages = calculate_pages(client.max_results) current_page = 0 start = 0 while not all_pages_found and current_page < pages: results_in_page = calculate_page_size(current_page, client.max_results) page = client.search(query, start, results_in_page) items = page.get('items') if not items: break for item in items: results.extend(parser(client, item)) # prepare next run current_page += 1 start += results_in_page total = int(demisto.get(page, 'searchInformation.totalResults', demisto.get(page, 'queries.request.totalResults', 0))) all_pages_found = total == start if not results: return ""No results found"" return results " 14456,"def get_playbooks_and_roles(options=None): """"""Find roles and playbooks."""""" if options is None: options = {} # git is preferred as it also considers .gitignore files = OrderedDict.fromkeys(sorted(subprocess.check_output( [""git"", ""ls-files"", ""*.yaml"", ""*.yml""], universal_newlines=True).split())) playbooks = [] role_dirs = [] role_internals = { 'defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', } # detect role in repository root: if 'tasks/main.yml' in files or 'tasks/main.yml' in files: role_dirs.append('.') for p in map(Path, files): if any(str(p).startswith(file_path) for file_path in options.exclude_paths): continue elif (next((i for i in p.parts if i.endswith('playbooks')), None) or 'playbook' in p.parts[-1]): playbooks.append(normpath(p)) continue # ignore if any folder ends with _vars if next((i for i in p.parts if i.endswith('_vars')), None): continue elif 'roles' in p.parts or '.' in role_dirs: if 'tasks' in p.parts and p.parts[-1] in ['main.yaml', 'main.yml']: role_dirs.append(str(p.parents[1])) elif role_internals.intersection(p.parts): continue elif 'tests' in p.parts: playbooks.append(normpath(p)) if 'molecule' in p.parts: if p.parts[-1] != 'molecule.yml': playbooks.append(normpath(p)) continue # hidden files are clearly not playbooks, likely config files. if p.parts[-1].startswith('.'): continue if is_playbook(p): playbooks.append(normpath(p)) continue if options.verbosity: print('Unknown file type: %s' % normpath(p)) if options.verbosity: print('Found roles: ' + ' '.join(role_dirs)) print('Found playbooks: ' + ' '.join(playbooks)) return role_dirs + playbooks ","def get_playbooks_and_roles(options=None): """"""Find roles and playbooks."""""" if options is None: options = {} # git is preferred as it also considers .gitignore files = OrderedDict.fromkeys(sorted(subprocess.check_output( [""git"", ""ls-files"", ""*.yaml"", ""*.yml""], universal_newlines=True).split())) playbooks = [] role_dirs = [] role_internals = { 'defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', } # detect role in repository root: if 'tasks/main.yml' in files: role_dirs.append('.') for p in map(Path, files): if any(str(p).startswith(file_path) for file_path in options.exclude_paths): continue elif (next((i for i in p.parts if i.endswith('playbooks')), None) or 'playbook' in p.parts[-1]): playbooks.append(normpath(p)) continue # ignore if any folder ends with _vars if next((i for i in p.parts if i.endswith('_vars')), None): continue elif 'roles' in p.parts or '.' in role_dirs: if 'tasks' in p.parts and p.parts[-1] in ['main.yaml', 'main.yml']: role_dirs.append(str(p.parents[1])) elif role_internals.intersection(p.parts): continue elif 'tests' in p.parts: playbooks.append(normpath(p)) if 'molecule' in p.parts: if p.parts[-1] != 'molecule.yml': playbooks.append(normpath(p)) continue # hidden files are clearly not playbooks, likely config files. if p.parts[-1].startswith('.'): continue if is_playbook(p): playbooks.append(normpath(p)) continue if options.verbosity: print('Unknown file type: %s' % normpath(p)) if options.verbosity: print('Found roles: ' + ' '.join(role_dirs)) print('Found playbooks: ' + ' '.join(playbooks)) return role_dirs + playbooks " 44106,"def tape_to_graph(tape: QuantumTape) -> MultiDiGraph: """""" Converts a quantum tape to a directed multigraph. .. note:: This operation is designed for use as part of the circuit cutting workflow. Check out the :func:`qml.cut_circuit() ` transform for more details. Args: tape (QuantumTape): tape to be converted into a directed multigraph Returns: nx.MultiDiGraph: a directed multigraph that captures the circuit structure of the input tape **Example** Consider the following tape: .. code-block:: python with qml.tape.QuantumTape() as tape: qml.RX(0.4, wires=0) qml.RY(0.9, wires=0) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(1)) Its corresponding circuit graph can be found using >>> qml.transforms.qcut.tape_to_graph(tape) """""" graph = MultiDiGraph() wire_latest_node = {w: None for w in tape.wires} for order, op in enumerate(tape.operations): _add_operator_node(graph, op, order, wire_latest_node) order += 1 # pylint: disable=undefined-loop-variable for m in tape.measurements: obs = getattr(m, ""obs"", None) if obs is not None and isinstance(obs, Tensor): for o in obs.obs: m_ = MeasurementProcess(m.return_type, obs=o) _add_operator_node(graph, m_, order, wire_latest_node) elif m.return_type.name == ""Sample"": for w in m.wires.tolist(): s_ = qml.sample(qml.Projector([1], wires=w)) _add_operator_node(graph, s_, order, wire_latest_node) else: _add_operator_node(graph, m, order, wire_latest_node) order += 1 return graph ","def tape_to_graph(tape: QuantumTape) -> MultiDiGraph: """""" Converts a quantum tape to a directed multigraph. .. note:: This operation is designed for use as part of the circuit cutting workflow. Check out the :func:`qml.cut_circuit() ` transform for more details. Args: tape (QuantumTape): tape to be converted into a directed multigraph Returns: nx.MultiDiGraph: a directed multigraph that captures the circuit structure of the input tape **Example** Consider the following tape: .. code-block:: python with qml.tape.QuantumTape() as tape: qml.RX(0.4, wires=0) qml.RY(0.9, wires=0) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(1)) Its corresponding circuit graph can be found using >>> qml.transforms.qcut.tape_to_graph(tape) """""" graph = MultiDiGraph() wire_latest_node = {w: None for w in tape.wires} for order, op in enumerate(tape.operations): _add_operator_node(graph, op, order, wire_latest_node) order += 1 # pylint: disable=undefined-loop-variable for m in tape.measurements: obs = getattr(m, ""obs"", None) if obs is not None and isinstance(obs, Tensor): for o in obs.obs: m_ = MeasurementProcess(m.return_type, obs=o) _add_operator_node(graph, m_, order, wire_latest_node) elif m.return_type.name == ""Sample"": for w in m.wires: s_ = qml.sample(qml.Projector([1], wires=w)) _add_operator_node(graph, s_, order, wire_latest_node) else: _add_operator_node(graph, m, order, wire_latest_node) order += 1 return graph " 31147,"def main(): SESSION.proxies = handle_proxy() client = SixgillEnrichClient( demisto.params()[""client_id""], demisto.params()[""client_secret""], CHANNEL_CODE, demisto, SESSION, VERIFY ) command = demisto.command() demisto.info(f""Command being called is {command}"") commands: Dict[str, Callable] = { ""test-module"": test_module_command, } try: if demisto.command() == ""ip"": return_results(ip_reputation_command(client, demisto.args())) elif demisto.command() == ""domain"": return_results(domain_reputation_command(client, demisto.args())) elif demisto.command() == ""url"": return_results(url_reputation_command(client, demisto.args())) elif demisto.command() == ""file"": return_results(file_reputation_command(client, demisto.args())) elif demisto.command() == ""actor"": return_results(actor_reputation_command(client, demisto.args())) elif demisto.command() == ""post_id"": return_results(postid_reputation_command(client, demisto.args())) else: readable_output, outputs, raw_response = commands[command](client, demisto.args()) return_outputs(readable_output, outputs, raw_response) except Exception as e: demisto.error(traceback.format_exc()) return_error(f""Error failed to execute {demisto.command()}, error: [{e}]"") ","def main(): SESSION.proxies = handle_proxy() client = SixgillEnrichClient( demisto.params()[""client_id""], demisto.params()[""client_secret""], CHANNEL_CODE, demisto, SESSION, VERIFY ) command = demisto.command() demisto.info(f""Command being called is {command}"") commands: Dict[str, Callable] = { ""test-module"": test_module_command, } try: if demisto.command() == ""ip"": return_results(ip_reputation_command(client, demisto.args())) elif command == ""domain"": return_results(domain_reputation_command(client, demisto.args())) elif demisto.command() == ""url"": return_results(url_reputation_command(client, demisto.args())) elif demisto.command() == ""file"": return_results(file_reputation_command(client, demisto.args())) elif demisto.command() == ""actor"": return_results(actor_reputation_command(client, demisto.args())) elif demisto.command() == ""post_id"": return_results(postid_reputation_command(client, demisto.args())) else: readable_output, outputs, raw_response = commands[command](client, demisto.args()) return_outputs(readable_output, outputs, raw_response) except Exception as e: demisto.error(traceback.format_exc()) return_error(f""Error failed to execute {demisto.command()}, error: [{e}]"") " 38344,"def add_xray_emissivity_field( ds, e_min, e_max, redshift=0.0, metallicity=(""gas"", ""metallicity""), table_type=""cloudy"", data_dir=None, cosmology=None, dist=None, ftype=""gas"", ): r""""""Create X-ray emissivity fields for a given energy range. Parameters ---------- e_min : float The minimum energy in keV for the energy band. e_min : float The maximum energy in keV for the energy band. redshift : float, optional The cosmological redshift of the source of the field. Default: 0.0. metallicity : str or tuple of str or float, optional Either the name of a metallicity field or a single floating-point number specifying a spatially constant metallicity. Must be in solar units. If set to None, no metals will be assumed. Default: (""gas"", ""metallicity"") table_type : string, optional The type of emissivity table to be used when creating the fields. Options are ""cloudy"" or ""apec"". Default: ""cloudy"" data_dir : string, optional The location to look for the data table in. If not supplied, the file will be looked for in the location of the YT_DEST environment variable or in the current working directory. cosmology : :class:`~yt.utilities.cosmology.Cosmology`, optional If set and redshift > 0.0, this cosmology will be used when computing the cosmological dependence of the emission fields. If not set, yt's default LCDM cosmology will be used. dist : (value, unit) tuple or :class:`~yt.units.yt_array.YTQuantity`, optional The distance to the source, used for making intensity fields. You should only use this if your source is nearby (not cosmological). Default: None ftype : string, optional The field type to use when creating the fields, default ""gas"" This will create at least three fields: ""xray_emissivity_{e_min}_{e_max}_keV"" (erg s^-1 cm^-3) ""xray_luminosity_{e_min}_{e_max}_keV"" (erg s^-1) ""xray_photon_emissivity_{e_min}_{e_max}_keV"" (photons s^-1 cm^-3) and if a redshift or distance is specified it will create two others: ""xray_intensity_{e_min}_{e_max}_keV"" (erg s^-1 cm^-3 arcsec^-2) ""xray_photon_intensity_{e_min}_{e_max}_keV"" (photons s^-1 cm^-3 arcsec^-2) These latter two are really only useful when making projections. Examples -------- >>> import yt >>> ds = yt.load(""sloshing_nomag2_hdf5_plt_cnt_0100"") >>> yt.add_xray_emissivity_field(ds, 0.5, 2) >>> p = yt.ProjectionPlot(ds, 'x', (""gas"",""xray_emissivity_0.5_2_keV""), ... table_type='apec') >>> p.save() """""" if not isinstance(metallicity, float) and metallicity is not None: try: metallicity = ds._get_field_info(*metallicity) except YTFieldNotFound as e: raise RuntimeError( ""Your dataset does not have a {} field! "".format(metallicity) + ""Perhaps you should specify a constant metallicity instead?"" ) from e if table_type == ""cloudy"": # Cloudy wants to scale by nH**2 other_n = ""H_nuclei_density"" else: # APEC wants to scale by nH*ne other_n = ""El_number_density"" def _norm_field(field, data): return data[ftype, ""H_nuclei_density""] * data[ftype, other_n] ds.add_field( (ftype, ""norm_field""), _norm_field, units=""cm**-6"", sampling_type=""local"" ) my_si = XrayEmissivityIntegrator(table_type, data_dir=data_dir, redshift=redshift) em_0 = my_si.get_interpolator(""primordial"", e_min, e_max) emp_0 = my_si.get_interpolator(""primordial"", e_min, e_max, energy=False) if metallicity is not None: em_Z = my_si.get_interpolator(""metals"", e_min, e_max) emp_Z = my_si.get_interpolator(""metals"", e_min, e_max, energy=False) def _emissivity_field(field, data): with np.errstate(all=""ignore""): dd = { ""log_nH"": np.log10(data[ftype, ""H_nuclei_density""]), ""log_T"": np.log10(data[ftype, ""temperature""]), } my_emissivity = np.power(10, em_0(dd)) if metallicity is not None: if isinstance(metallicity, DerivedField): my_Z = data[metallicity.name].to(""Zsun"") else: my_Z = metallicity my_emissivity += my_Z * np.power(10, em_Z(dd)) my_emissivity[np.isnan(my_emissivity)] = 0 return data[ftype, ""norm_field""] * YTArray(my_emissivity, ""erg*cm**3/s"") emiss_name = (ftype, ""xray_emissivity_%s_%s_keV"" % (e_min, e_max)) ds.add_field( emiss_name, function=_emissivity_field, display_name=r""\epsilon_{X} (%s-%s keV)"" % (e_min, e_max), sampling_type=""local"", units=""erg/cm**3/s"", ) def _luminosity_field(field, data): return data[emiss_name] * data[ftype, ""mass""] / data[ftype, ""density""] lum_name = (ftype, ""xray_luminosity_%s_%s_keV"" % (e_min, e_max)) ds.add_field( lum_name, function=_luminosity_field, display_name=r""\rm{L}_{X} (%s-%s keV)"" % (e_min, e_max), sampling_type=""local"", units=""erg/s"", ) def _photon_emissivity_field(field, data): dd = { ""log_nH"": np.log10(data[ftype, ""H_nuclei_density""]), ""log_T"": np.log10(data[ftype, ""temperature""]), } my_emissivity = np.power(10, emp_0(dd)) if metallicity is not None: if isinstance(metallicity, DerivedField): my_Z = data[metallicity.name].to(""Zsun"") else: my_Z = metallicity my_emissivity += my_Z * np.power(10, emp_Z(dd)) return data[ftype, ""norm_field""] * YTArray(my_emissivity, ""photons*cm**3/s"") phot_name = (ftype, ""xray_photon_emissivity_%s_%s_keV"" % (e_min, e_max)) ds.add_field( phot_name, function=_photon_emissivity_field, display_name=r""\epsilon_{X} (%s-%s keV)"" % (e_min, e_max), sampling_type=""local"", units=""photons/cm**3/s"", ) fields = [emiss_name, lum_name, phot_name] if redshift > 0.0 or dist is not None: if dist is None: if cosmology is None: if hasattr(ds, ""cosmology""): cosmology = ds.cosmology else: cosmology = Cosmology() D_L = cosmology.luminosity_distance(0.0, redshift) angular_scale = 1.0 / cosmology.angular_scale(0.0, redshift) dist_fac = ds.quan( 1.0 / (4.0 * np.pi * D_L * D_L * angular_scale * angular_scale).v, ""rad**-2"", ) else: redshift = 0.0 # Only for local sources! try: # normal behaviour, if dist is a YTQuantity dist = ds.quan(dist.value, dist.units) except AttributeError as e: try: dist = ds.quan(*dist) except (RuntimeError, TypeError): raise TypeError( ""dist should be a YTQuantity "" ""or a (value, unit) tuple!"" ) from e angular_scale = dist / ds.quan(1.0, ""radian"") dist_fac = ds.quan( 1.0 / (4.0 * np.pi * dist * dist * angular_scale * angular_scale).v, ""rad**-2"", ) ei_name = (ftype, ""xray_intensity_%s_%s_keV"" % (e_min, e_max)) def _intensity_field(field, data): I = dist_fac * data[emiss_name] return I.in_units(""erg/cm**3/s/arcsec**2"") ds.add_field( ei_name, function=_intensity_field, display_name=r""I_{X} (%s-%s keV)"" % (e_min, e_max), sampling_type=""local"", units=""erg/cm**3/s/arcsec**2"", ) i_name = (ftype, ""xray_photon_intensity_%s_%s_keV"" % (e_min, e_max)) def _photon_intensity_field(field, data): I = (1.0 + redshift) * dist_fac * data[phot_name] return I.in_units(""photons/cm**3/s/arcsec**2"") ds.add_field( i_name, function=_photon_intensity_field, display_name=r""I_{X} (%s-%s keV)"" % (e_min, e_max), sampling_type=""local"", units=""photons/cm**3/s/arcsec**2"", ) fields += [ei_name, i_name] [mylog.info(""Adding ('%s','%s') field."" % field) for field in fields] return fields ","def add_xray_emissivity_field( ds, e_min, e_max, redshift=0.0, metallicity=(""gas"", ""metallicity""), table_type=""cloudy"", data_dir=None, cosmology=None, dist=None, ftype=""gas"", ): r""""""Create X-ray emissivity fields for a given energy range. Parameters ---------- e_min : float The minimum energy in keV for the energy band. e_min : float The maximum energy in keV for the energy band. redshift : float, optional The cosmological redshift of the source of the field. Default: 0.0. metallicity : str or tuple of str or float, optional Either the name of a metallicity field or a single floating-point number specifying a spatially constant metallicity. Must be in solar units. If set to None, no metals will be assumed. Default: (""gas"", ""metallicity"") table_type : string, optional The type of emissivity table to be used when creating the fields. Options are ""cloudy"" or ""apec"". Default: ""cloudy"" data_dir : string, optional The location to look for the data table in. If not supplied, the file will be looked for in the location of the YT_DEST environment variable or in the current working directory. cosmology : :class:`~yt.utilities.cosmology.Cosmology`, optional If set and redshift > 0.0, this cosmology will be used when computing the cosmological dependence of the emission fields. If not set, yt's default LCDM cosmology will be used. dist : (value, unit) tuple or :class:`~yt.units.yt_array.YTQuantity`, optional The distance to the source, used for making intensity fields. You should only use this if your source is nearby (not cosmological). Default: None ftype : string, optional The field type to use when creating the fields, default ""gas"" This will create at least three fields: ""xray_emissivity_{e_min}_{e_max}_keV"" (erg s^-1 cm^-3) ""xray_luminosity_{e_min}_{e_max}_keV"" (erg s^-1) ""xray_photon_emissivity_{e_min}_{e_max}_keV"" (photons s^-1 cm^-3) and if a redshift or distance is specified it will create two others: ""xray_intensity_{e_min}_{e_max}_keV"" (erg s^-1 cm^-3 arcsec^-2) ""xray_photon_intensity_{e_min}_{e_max}_keV"" (photons s^-1 cm^-3 arcsec^-2) These latter two are really only useful when making projections. Examples -------- >>> import yt >>> ds = yt.load(""sloshing_nomag2_hdf5_plt_cnt_0100"") >>> yt.add_xray_emissivity_field(ds, 0.5, 2) >>> p = yt.ProjectionPlot(ds, 'x', (""gas"",""xray_emissivity_0.5_2_keV""), ... table_type='apec') >>> p.save() """""" if not isinstance(metallicity, float) and metallicity is not None: try: metallicity = ds._get_field_info(*metallicity) except YTFieldNotFound as e: raise RuntimeError( ""Your dataset does not have a {} field! "".format(metallicity) + ""Perhaps you should specify a constant metallicity instead?"" ) from e if table_type == ""cloudy"": # Cloudy wants to scale by nH**2 other_n = ""H_nuclei_density"" else: # APEC wants to scale by nH*ne other_n = ""El_number_density"" def _norm_field(field, data): return data[ftype, ""H_nuclei_density""] * data[ftype, other_n] ds.add_field( (ftype, ""norm_field""), _norm_field, units=""cm**-6"", sampling_type=""local"" ) my_si = XrayEmissivityIntegrator(table_type, data_dir=data_dir, redshift=redshift) em_0 = my_si.get_interpolator(""primordial"", e_min, e_max) emp_0 = my_si.get_interpolator(""primordial"", e_min, e_max, energy=False) if metallicity is not None: em_Z = my_si.get_interpolator(""metals"", e_min, e_max) emp_Z = my_si.get_interpolator(""metals"", e_min, e_max, energy=False) def _emissivity_field(field, data): with np.errstate(all=""ignore""): dd = { ""log_nH"": np.log10(data[ftype, ""H_nuclei_density""]), ""log_T"": np.log10(data[ftype, ""temperature""]), } my_emissivity = np.power(10, em_0(dd)) if metallicity is not None: if isinstance(metallicity, DerivedField): my_Z = data[metallicity.name].to(""Zsun"") else: my_Z = metallicity my_emissivity += my_Z * np.power(10, em_Z(dd)) my_emissivity[np.isnan(my_emissivity)] = 0 return data[ftype, ""norm_field""] * YTArray(my_emissivity, ""erg*cm**3/s"") emiss_name = (ftype, ""xray_emissivity_%s_%s_keV"" % (e_min, e_max)) ds.add_field( emiss_name, function=_emissivity_field, display_name=r""\epsilon_{X} (%s-%s keV)"" % (e_min, e_max), sampling_type=""local"", units=""erg/cm**3/s"", ) def _luminosity_field(field, data): return data[emiss_name] * data[ftype, ""mass""] / data[ftype, ""density""] lum_name = (ftype, ""xray_luminosity_%s_%s_keV"" % (e_min, e_max)) ds.add_field( lum_name, function=_luminosity_field, display_name=r""\rm{L}_{X} (%s-%s keV)"" % (e_min, e_max), sampling_type=""local"", units=""erg/s"", ) def _photon_emissivity_field(field, data): dd = { ""log_nH"": np.log10(data[ftype, ""H_nuclei_density""]), ""log_T"": np.log10(data[ftype, ""temperature""]), } my_emissivity = np.power(10, emp_0(dd)) if metallicity is not None: if isinstance(metallicity, DerivedField): my_Z = data[metallicity.name].to(""Zsun"") else: my_Z = metallicity my_emissivity += my_Z * np.power(10, emp_Z(dd)) return data[ftype, ""norm_field""] * YTArray(my_emissivity, ""photons*cm**3/s"") phot_name = (ftype, ""xray_photon_emissivity_%s_%s_keV"" % (e_min, e_max)) ds.add_field( phot_name, function=_photon_emissivity_field, display_name=r""\epsilon_{X} (%s-%s keV)"" % (e_min, e_max), sampling_type=""local"", units=""photons/cm**3/s"", ) fields = [emiss_name, lum_name, phot_name] if redshift > 0.0 or dist is not None: if dist is None: if cosmology is None: if hasattr(ds, ""cosmology""): cosmology = ds.cosmology else: cosmology = Cosmology() D_L = cosmology.luminosity_distance(0.0, redshift) angular_scale = 1.0 / cosmology.angular_scale(0.0, redshift) dist_fac = ds.quan( 1.0 / (4.0 * np.pi * D_L * D_L * angular_scale * angular_scale).v, ""rad**-2"", ) else: redshift = 0.0 # Only for local sources! try: # normal behaviour, if dist is a YTQuantity dist = ds.quan(dist.value, dist.units) except AttributeError as e: try: dist = ds.quan(*dist) except (RuntimeError, TypeError): raise TypeError( ""dist should be a YTQuantity or a (value, unit) tuple!"" ) from e angular_scale = dist / ds.quan(1.0, ""radian"") dist_fac = ds.quan( 1.0 / (4.0 * np.pi * dist * dist * angular_scale * angular_scale).v, ""rad**-2"", ) ei_name = (ftype, ""xray_intensity_%s_%s_keV"" % (e_min, e_max)) def _intensity_field(field, data): I = dist_fac * data[emiss_name] return I.in_units(""erg/cm**3/s/arcsec**2"") ds.add_field( ei_name, function=_intensity_field, display_name=r""I_{X} (%s-%s keV)"" % (e_min, e_max), sampling_type=""local"", units=""erg/cm**3/s/arcsec**2"", ) i_name = (ftype, ""xray_photon_intensity_%s_%s_keV"" % (e_min, e_max)) def _photon_intensity_field(field, data): I = (1.0 + redshift) * dist_fac * data[phot_name] return I.in_units(""photons/cm**3/s/arcsec**2"") ds.add_field( i_name, function=_photon_intensity_field, display_name=r""I_{X} (%s-%s keV)"" % (e_min, e_max), sampling_type=""local"", units=""photons/cm**3/s/arcsec**2"", ) fields += [ei_name, i_name] [mylog.info(""Adding ('%s','%s') field."" % field) for field in fields] return fields " 38723,"def main(): # Setup command line options argparser = argparse.ArgumentParser() output_options = argparser.add_argument_group( 'Options controlling ReFrame output' ) locate_options = argparser.add_argument_group( 'Options for discovering checks' ) select_options = argparser.add_argument_group( 'Options for selecting checks' ) action_options = argparser.add_argument_group( 'Options controlling actions' ) run_options = argparser.add_argument_group( 'Options controlling the execution of checks' ) env_options = argparser.add_argument_group( 'Options controlling the ReFrame environment' ) misc_options = argparser.add_argument_group('Miscellaneous options') # Output directory options output_options.add_argument( '--prefix', action='store', metavar='DIR', help='Set general directory prefix to DIR', envvar='RFM_PREFIX', configvar='systems/prefix' ) output_options.add_argument( '-o', '--output', action='store', metavar='DIR', help='Set output directory prefix to DIR', envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir' ) output_options.add_argument( '-s', '--stage', action='store', metavar='DIR', help='Set stage directory prefix to DIR', envvar='RFM_STAGE_DIR', configvar='systems/stagedir' ) output_options.add_argument( '--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT', help=('Append a timestamp to the output and stage directory prefixes ' '(default: ""%%FT%%T"")'), envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs' ) output_options.add_argument( '--perflogdir', action='store', metavar='DIR', help=('Set performance log data directory prefix ' '(relevant only to the filelog log handler)'), envvar='RFM_PERFLOG_DIR', configvar='logging/handlers_perflog/filelog_basedir' ) output_options.add_argument( '--keep-stage-files', action='store_true', help='Keep stage directories even for successful checks', envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files' ) output_options.add_argument( '--dont-restage', action='store_false', dest='clean_stagedir', help='Reuse the test stage directory', envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir' ) output_options.add_argument( '--save-log-files', action='store_true', default=False, help='Save ReFrame log files to the output directory', envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files' ) output_options.add_argument( '--report-file', action='store', metavar='FILE', help=""Store JSON run report in FILE"", envvar='RFM_REPORT_FILE', configvar='general/report_file' ) # Check discovery options locate_options.add_argument( '-c', '--checkpath', action='append', metavar='PATH', help=""Add PATH to the check search path list"", envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path' ) locate_options.add_argument( '-R', '--recursive', action='store_true', help='Search for checks in the search path recursively', envvar='RFM_CHECK_SEARCH_RECURSIVE', configvar='general/check_search_recursive' ) locate_options.add_argument( '--ignore-check-conflicts', action='store_true', help='Skip checks with conflicting names', envvar='RFM_IGNORE_CHECK_CONFLICTS', configvar='general/ignore_check_conflicts' ) # Select options select_options.add_argument( '-t', '--tag', action='append', dest='tags', metavar='PATTERN', default=[], help='Select checks with at least one tag matching PATTERN' ) select_options.add_argument( '-n', '--name', action='append', dest='names', default=[], metavar='PATTERN', help='Select checks whose name matches PATTERN' ) select_options.add_argument( '-x', '--exclude', action='append', dest='exclude_names', metavar='PATTERN', default=[], help='Exclude checks whose name matches PATTERN' ) select_options.add_argument( '-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN', help=('Select checks with at least one ' 'programming environment matching PATTERN') ) select_options.add_argument( '--failed', action='store_true', help=""Select failed test cases (only when '--restore-session' is used)"" ) select_options.add_argument( '--gpu-only', action='store_true', help='Select only GPU checks' ) select_options.add_argument( '--cpu-only', action='store_true', help='Select only CPU checks' ) # Action options action_options.add_argument( '-l', '--list', action='store_true', help='List the selected checks' ) action_options.add_argument( '-L', '--list-detailed', action='store_true', help='List the selected checks providing details for each test' ) action_options.add_argument( '--list-tags', action='store_true', help='List the unique tags found in the selected tests' ) action_options.add_argument( '-r', '--run', action='store_true', help='Run the selected checks' ) action_options.add_argument( '--ci-generate', action='store', metavar='FILE', help=('Generate into FILE a Gitlab CI pipeline ' 'for the selected tests and exit'), ) # Run options run_options.add_argument( '-J', '--job-option', action='append', metavar='OPT', dest='job_options', default=[], help='Pass option OPT to job scheduler' ) run_options.add_argument( '--force-local', action='store_true', help='Force local execution of checks' ) run_options.add_argument( '--skip-sanity-check', action='store_true', help='Skip sanity checking' ) run_options.add_argument( '--skip-performance-check', action='store_true', help='Skip performance checking' ) run_options.add_argument( '--strict', action='store_true', help='Enforce strict performance checking' ) run_options.add_argument( '--skip-system-check', action='store_true', help='Skip system check' ) run_options.add_argument( '--skip-prgenv-check', action='store_true', help='Skip programming environment check' ) run_options.add_argument( '--exec-policy', metavar='POLICY', action='store', choices=['async', 'serial'], default='async', help='Set the execution policy of ReFrame (default: ""async"")' ) run_options.add_argument( '--mode', action='store', help='Execution mode to use' ) run_options.add_argument( '--max-retries', metavar='NUM', action='store', default=0, help='Set the maximum number of times a failed regression test ' 'may be retried (default: 0)' ) run_options.add_argument( '--maxfail', metavar='NUM', action='store', default=sys.maxsize, help='Exit after first NUM failures' ) run_options.add_argument( '--restore-session', action='store', nargs='?', const='', metavar='REPORT', help='Restore a testing session from REPORT file' ) run_options.add_argument( '--flex-alloc-nodes', action='store', dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None, help='Set strategy for the flexible node allocation (default: ""idle"").' ) run_options.add_argument( '--disable-hook', action='append', metavar='NAME', dest='hooks', default=[], help='Disable a pipeline hook for this run' ) # Environment options env_options.add_argument( '-M', '--map-module', action='append', metavar='MAPPING', dest='module_mappings', default=[], help='Add a module mapping', envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings' ) env_options.add_argument( '-m', '--module', action='append', default=[], metavar='MOD', dest='user_modules', help='Load module MOD before running any regression check', envvar='RFM_USER_MODULES ,', configvar='general/user_modules' ) env_options.add_argument( '--module-mappings', action='store', metavar='FILE', dest='module_map_file', help='Load module mappings from FILE', envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file' ) env_options.add_argument( '-u', '--unload-module', action='append', metavar='MOD', dest='unload_modules', default=[], help='Unload module MOD before running any regression check', envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules' ) env_options.add_argument( '--module-path', action='append', metavar='PATH', dest='module_paths', default=[], help='(Un)use module path PATH before running any regression check', ) env_options.add_argument( '--purge-env', action='store_true', dest='purge_env', default=False, help='Unload all modules before running any regression check', envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment' ) env_options.add_argument( '--non-default-craype', action='store_true', help='Test a non-default Cray Programming Environment', envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype' ) # Miscellaneous options misc_options.add_argument( '-C', '--config-file', action='store', dest='config_file', metavar='FILE', help='Set configuration file', envvar='RFM_CONFIG_FILE' ) misc_options.add_argument( '--nocolor', action='store_false', dest='colorize', help='Disable coloring of output', envvar='RFM_COLORIZE', configvar='general/colorize' ) misc_options.add_argument( '--failure-stats', action='store_true', help='Print failure statistics' ) misc_options.add_argument( '--performance-report', action='store_true', help='Print a report for performance tests' ) misc_options.add_argument( '--show-config', action='store', nargs='?', const='all', metavar='PARAM', help='Print the value of configuration parameter PARAM and exit' ) misc_options.add_argument( '--system', action='store', help='Load configuration for SYSTEM', envvar='RFM_SYSTEM' ) misc_options.add_argument( '--upgrade-config-file', action='store', metavar='OLD[:NEW]', help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax' ) misc_options.add_argument( '-V', '--version', action='version', version=osext.reframe_version() ) misc_options.add_argument( '-v', '--verbose', action='count', help='Increase verbosity level of output', envvar='RFM_VERBOSE', configvar='general/verbose' ) # Options not associated with command-line arguments argparser.add_argument( dest='graylog_server', envvar='RFM_GRAYLOG_ADDRESS', configvar='logging/handlers_perflog/graylog_address', help='Graylog server address' ) argparser.add_argument( dest='syslog_address', envvar='RFM_SYSLOG_ADDRESS', configvar='logging/handlers_perflog/syslog_address', help='Syslog server address' ) argparser.add_argument( dest='ignore_reqnodenotavail', envvar='RFM_IGNORE_REQNODENOTAVAIL', configvar='schedulers/ignore_reqnodenotavail', action='store_true', help='Graylog server address' ) argparser.add_argument( dest='use_login_shell', envvar='RFM_USE_LOGIN_SHELL', configvar='general/use_login_shell', action='store_true', help='Use a login shell for job scripts' ) # Parse command line options = argparser.parse_args() if len(sys.argv) == 1: argparser.print_help() sys.exit(1) # First configure logging with our generic configuration so as to be able # to print pretty messages; logging will be reconfigured by user's # configuration later site_config = config.load_config( os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py') ) site_config.select_subconfig('generic') options.update_config(site_config) logging.configure_logging(site_config) logging.getlogger().colorize = site_config.get('general/0/colorize') printer = PrettyPrinter() printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) if os.getenv('RFM_GRAYLOG_SERVER'): printer.warning( 'RFM_GRAYLOG_SERVER environment variable is deprecated; ' 'please use RFM_GRAYLOG_ADDRESS instead' ) os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER') if options.upgrade_config_file is not None: old_config, *new_config = options.upgrade_config_file.split( ':', maxsplit=1 ) new_config = new_config[0] if new_config else None try: new_config = config.convert_old_config(old_config, new_config) except Exception as e: printer.error(f'could not convert file: {e}') sys.exit(1) printer.info( f'Conversion successful! ' f'The converted file can be found at {new_config!r}.' ) sys.exit(0) # Now configure ReFrame according to the user configuration file try: try: printer.debug('Loading user configuration') site_config = config.load_config(options.config_file) except warnings.ReframeDeprecationWarning as e: printer.warning(e) converted = config.convert_old_config(options.config_file) printer.warning( f""configuration file has been converted "" f""to the new syntax here: '{converted}'"" ) site_config = config.load_config(converted) site_config.validate() # We ignore errors about unresolved sections or configuration # parameters here, because they might be defined at the individual # partition level and will be caught when we will instantiating # internally the system and partitions later on. site_config.select_subconfig(options.system, ignore_resolve_errors=True) for err in options.update_config(site_config): printer.warning(str(err)) # Update options from the selected execution mode if options.mode: mode_args = site_config.get(f'modes/@{options.mode}/options') # We lexically split the mode options, because otherwise spaces # will be treated as part of the option argument; see GH bug #1554 mode_args = list(itertools.chain.from_iterable(shlex.split(m) for m in mode_args)) # Parse the mode's options and reparse the command-line options = argparser.parse_args(mode_args) options = argparser.parse_args(namespace=options.cmd_options) options.update_config(site_config) logging.configure_logging(site_config) except (OSError, errors.ConfigError) as e: printer.error(f'failed to load configuration: {e}') printer.error(logfiles_message()) sys.exit(1) logging.getlogger().colorize = site_config.get('general/0/colorize') printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) try: printer.debug('Initializing runtime') runtime.init_runtime(site_config) except errors.ConfigError as e: printer.error(f'failed to initialize runtime: {e}') printer.error(logfiles_message()) sys.exit(1) rt = runtime.runtime() try: if site_config.get('general/0/module_map_file'): rt.modules_system.load_mapping_from_file( site_config.get('general/0/module_map_file') ) if site_config.get('general/0/module_mappings'): for m in site_config.get('general/0/module_mappings'): rt.modules_system.load_mapping(m) except (errors.ConfigError, OSError) as e: printer.error('could not load module mappings: %s' % e) sys.exit(1) if (osext.samefile(rt.stage_prefix, rt.output_prefix) and not site_config.get('general/0/keep_stage_files')): printer.error(""stage and output refer to the same directory; "" ""if this is on purpose, please use the "" ""'--keep-stage-files' option."") printer.error(logfiles_message()) sys.exit(1) # Show configuration after everything is set up if options.show_config: config_param = options.show_config if config_param == 'all': printer.info(str(rt.site_config)) else: value = rt.get_option(config_param) if value is None: printer.error( f'no such configuration parameter found: {config_param}' ) else: printer.info(json.dumps(value, indent=2)) sys.exit(0) printer.debug(format_env(options.env_vars)) # Setup the check loader if options.restore_session is not None: # We need to load the failed checks only from a report if options.restore_session: filename = options.restore_session else: filename = runreport.next_report_filename( osext.expandvars(site_config.get('general/0/report_file')), new=False ) report = runreport.load_report(filename) check_search_path = list(report.slice('filename', unique=True)) check_search_recursive = False # If `-c` or `-R` are passed explicitly outside the configuration # file, override the values set from the report file if site_config.is_sticky_option('general/check_search_path'): printer.warning( 'Ignoring check search path set in the report file: ' 'search path set explicitly in the command-line or ' 'the environment' ) check_search_path = site_config.get( 'general/0/check_search_path' ) if site_config.is_sticky_option('general/check_search_recursive'): printer.warning( 'Ignoring check search recursive option from the report file: ' 'option set explicitly in the command-line or the environment' ) check_search_recursive = site_config.get( 'general/0/check_search_recursive' ) else: check_search_recursive = site_config.get( 'general/0/check_search_recursive' ) check_search_path = site_config.get('general/0/check_search_path') loader = RegressionCheckLoader( load_path=check_search_path, recurse=check_search_recursive, ignore_conflicts=site_config.get( 'general/0/ignore_check_conflicts' ) ) def print_infoline(param, value): param = param + ':' printer.info(f"" {param.ljust(18)} {value}"") session_info = { 'cmdline': ' '.join(sys.argv), 'config_file': rt.site_config.filename, 'data_version': runreport.DATA_VERSION, 'hostname': socket.gethostname(), 'prefix_output': rt.output_prefix, 'prefix_stage': rt.stage_prefix, 'user': osext.osuser(), 'version': osext.reframe_version(), 'workdir': os.getcwd(), } # Print command line printer.info(f""[ReFrame Setup]"") print_infoline('version', session_info['version']) print_infoline('command', repr(session_info['cmdline'])) print_infoline( f""launched by"", f""{session_info['user'] or ''}@{session_info['hostname']}"" ) print_infoline('working directory', repr(session_info['workdir'])) print_infoline('settings file', f""{session_info['config_file']!r}"") print_infoline('check search path', f""{'(R) ' if loader.recurse else ''}"" f""{':'.join(loader.load_path)!r}"") print_infoline('stage directory', repr(session_info['prefix_stage'])) print_infoline('output directory', repr(session_info['prefix_output'])) printer.info('') try: # Locate and load checks try: checks_found = loader.load_all() printer.verbose(f'Loaded {len(checks_found)} test(s)') except OSError as e: raise errors.ReframeError from e # Generate all possible test cases first; we will need them for # resolving dependencies after filtering # Determine the allowed programming environments allowed_environs = {e.name for env_patt in options.prgenv for p in rt.system.partitions for e in p.environs if re.match(env_patt, e.name)} testcases_all = generate_testcases(checks_found, options.skip_system_check, options.skip_prgenv_check, allowed_environs) testcases = testcases_all printer.verbose(f'Generated {len(testcases)} test case(s)') # Filter test cases by name if options.exclude_names: for name in options.exclude_names: testcases = filter(filters.have_not_name(name), testcases) if options.names: testcases = filter( filters.have_name('|'.join(options.names)), testcases ) testcases = list(testcases) printer.verbose( f'Filtering test cases(s) by name: {len(testcases)} remaining' ) # Filter test cases by tags for tag in options.tags: testcases = filter(filters.have_tag(tag), testcases) testcases = list(testcases) printer.verbose( f'Filtering test cases(s) by tags: {len(testcases)} remaining' ) # Filter test cases further if options.gpu_only and options.cpu_only: printer.error(""options `--gpu-only' and `--cpu-only' "" ""are mutually exclusive"") sys.exit(1) if options.gpu_only: testcases = filter(filters.have_gpu_only(), testcases) elif options.cpu_only: testcases = filter(filters.have_cpu_only(), testcases) testcases = list(testcases) printer.verbose( f'Filtering test cases(s) by other attributes: ' f'{len(testcases)} remaining' ) # Filter in failed cases if options.failed: if options.restore_session is None: printer.error( ""the option '--failed' can only be used "" ""in combination with the '--restore-session' option"" ) sys.exit(1) def _case_failed(t): rec = report.case(*t) if not rec: return False return (rec['result'] == 'failure' or rec['result'] == 'aborted') testcases = list(filter(_case_failed, testcases)) printer.verbose( f'Filtering successful test case(s): ' f'{len(testcases)} remaining' ) # Prepare for running printer.debug('Building and validating the full test DAG') testgraph, skipped_cases = dependencies.build_deps(testcases_all) if skipped_cases: # Some cases were skipped, so adjust testcases testcases = list(set(testcases) - set(skipped_cases)) printer.verbose( f'Filtering test case(s) due to unresolved dependencies: ' f'{len(testcases)} remaining' ) dependencies.validate_deps(testgraph) printer.debug('Full test DAG:') printer.debug(dependencies.format_deps(testgraph)) restored_cases = [] if len(testcases) != len(testcases_all): testgraph = dependencies.prune_deps( testgraph, testcases, max_depth=1 if options.restore_session is not None else None ) printer.debug('Pruned test DAG') printer.debug(dependencies.format_deps(testgraph)) if options.restore_session is not None: testgraph, restored_cases = report.restore_dangling(testgraph) testcases = dependencies.toposort( testgraph, is_subgraph=options.restore_session is not None ) printer.verbose(f'Final number of test cases: {len(testcases)}') # Disable hooks for tc in testcases: for h in options.hooks: tc.check.disable_hook(h) # Act on checks if options.list or options.list_detailed: list_checks(testcases, printer, options.list_detailed) sys.exit(0) if options.list_tags: list_tags(testcases, printer) sys.exit(0) if options.ci_generate: list_checks(testcases, printer) printer.info('[Generate CI]') with open(options.ci_generate, 'wt') as fp: ci.emit_pipeline(fp, testcases) printer.info( f' Gitlab pipeline generated successfully ' f'in {options.ci_generate!r}.\n' ) sys.exit(0) if not options.run: printer.error(""No action option specified. Available options:\n"" "" - `-l'/`-L' for listing\n"" "" - `-r' for running\n"" "" - `--ci-generate' for generating a CI pipeline\n"" f""Try `{argparser.prog} -h' for more options."") sys.exit(1) # Manipulate ReFrame's environment if site_config.get('general/0/purge_environment'): rt.modules_system.unload_all() else: for m in site_config.get('general/0/unload_modules'): rt.modules_system.unload_module(**m) # Load the environment for the current system try: printer.debug(f'Loading environment for current system') runtime.loadenv(rt.system.preload_environ) except errors.EnvironError as e: printer.error(""failed to load current system's environment; "" ""please check your configuration"") printer.debug(str(e)) raise def module_use(*paths): try: rt.modules_system.searchpath_add(*paths) except errors.EnvironError as e: printer.warning(f'could not add module paths correctly') printer.debug(str(e)) def module_unuse(*paths): try: rt.modules_system.searchpath_remove(*paths) except errors.EnvironError as e: printer.warning(f'could not remove module paths correctly') printer.debug(str(e)) printer.debug('(Un)using module paths from command line') module_paths = {} for d in options.module_paths: if d.startswith('-'): module_paths.setdefault('-', []) module_paths['-'].append(d[1:]) elif d.startswith('+'): module_paths.setdefault('+', []) module_paths['+'].append(d[1:]) else: module_paths.setdefault('x', []) module_paths['x'].append(d) for op, paths in module_paths.items(): if op == '+': module_use(*paths) elif op == '-': module_unuse(*paths) else: # First empty the current module path in a portable way searchpath = [p for p in rt.modules_system.searchpath if p] if searchpath: rt.modules_system.searchpath_remove(*searchpath) # Treat `A:B` syntax as well in this case paths = itertools.chain(*(p.split(':') for p in paths)) module_use(*paths) printer.debug('Loading user modules from command line') for m in site_config.get('general/0/user_modules'): try: rt.modules_system.load_module(**m, force=True) except errors.EnvironError as e: printer.warning( f'could not load module {m[""name""]!r} correctly; ' f'skipping...' ) printer.debug(str(e)) options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle' # Run the tests # Setup the execution policy if options.exec_policy == 'serial': exec_policy = SerialExecutionPolicy() elif options.exec_policy == 'async': exec_policy = AsynchronousExecutionPolicy() else: # This should not happen, since choices are handled by # argparser printer.error(""unknown execution policy `%s': Exiting..."") sys.exit(1) exec_policy.skip_system_check = options.skip_system_check exec_policy.force_local = options.force_local exec_policy.strict_check = options.strict exec_policy.skip_sanity_check = options.skip_sanity_check exec_policy.skip_performance_check = options.skip_performance_check exec_policy.keep_stage_files = site_config.get( 'general/0/keep_stage_files' ) try: errmsg = ""invalid option for --flex-alloc-nodes: '{0}'"" sched_flex_alloc_nodes = int(options.flex_alloc_nodes) if sched_flex_alloc_nodes <= 0: raise errors.ConfigError( errmsg.format(options.flex_alloc_nodes) ) except ValueError: sched_flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes parsed_job_options = [] for opt in options.job_options: opt_split = opt.split('=', maxsplit=1) optstr = opt_split[0] valstr = opt_split[1] if len(opt_split) > 1 else '' if opt.startswith('-') or opt.startswith('#'): parsed_job_options.append(opt) elif len(optstr) == 1: parsed_job_options.append(f'-{optstr} {valstr}') else: parsed_job_options.append(f'--{optstr} {valstr}') exec_policy.sched_options = parsed_job_options try: max_retries = int(options.max_retries) except ValueError: raise errors.ConfigError( f'--max-retries is not a valid integer: {max_retries}' ) from None try: max_failures = int(options.maxfail) if max_failures < 0: raise errors.ConfigError( f'--maxfail should be a non-negative integer: ' f'{options.maxfail!r}' ) except ValueError: raise errors.ConfigError( f'--maxfail is not a valid integer: {options.maxfail!r}' ) from None runner = Runner(exec_policy, printer, max_retries, max_failures) try: time_start = time.time() session_info['time_start'] = time.strftime( '%FT%T%z', time.localtime(time_start), ) runner.runall(testcases, restored_cases) finally: time_end = time.time() session_info['time_end'] = time.strftime( '%FT%T%z', time.localtime(time_end) ) session_info['time_elapsed'] = time_end - time_start # Print a retry report if we did any retries if runner.stats.failed(run=0): printer.info(runner.stats.retry_report()) # Print a failure report if we had failures in the last run success = True if runner.stats.failed(): success = False runner.stats.print_failure_report(printer) if options.failure_stats: runner.stats.print_failure_stats(printer) if options.performance_report: printer.info(runner.stats.performance_report()) # Generate the report for this session report_file = os.path.normpath( osext.expandvars(rt.get_option('general/0/report_file')) ) basedir = os.path.dirname(report_file) if basedir: os.makedirs(basedir, exist_ok=True) # Build final JSON report run_stats = runner.stats.json() session_info.update({ 'num_cases': run_stats[0]['num_cases'], 'num_failures': run_stats[-1]['num_failures'] }) json_report = { 'session_info': session_info, 'runs': run_stats, 'restored_cases': [] } if options.restore_session is not None: for c in restored_cases: json_report['restored_cases'].append(report.case(*c)) report_file = runreport.next_report_filename(report_file) try: with open(report_file, 'w') as fp: jsonext.dump(json_report, fp, indent=2) fp.write('\n') except OSError as e: printer.warning( f'failed to generate report in {report_file!r}: {e}' ) if not success: sys.exit(1) sys.exit(0) except (Exception, KeyboardInterrupt, errors.ReframeFatalError): exc_info = sys.exc_info() tb = ''.join(traceback.format_exception(*exc_info)) printer.error(f'run session stopped: {errors.what(*exc_info)}') if errors.is_exit_request(*exc_info): # Print stack traces for exit requests only when TOO verbose printer.debug2(tb) elif errors.is_severe(*exc_info): printer.error(tb) else: printer.verbose(tb) sys.exit(1) finally: try: log_files = logging.log_files() if site_config.get('general/0/save_log_files'): log_files = logging.save_log_files(rt.output_prefix) except OSError as e: printer.error(f'could not save log file: {e}') sys.exit(1) finally: printer.info(logfiles_message()) ","def main(): # Setup command line options argparser = argparse.ArgumentParser() output_options = argparser.add_argument_group( 'Options controlling ReFrame output' ) locate_options = argparser.add_argument_group( 'Options for discovering checks' ) select_options = argparser.add_argument_group( 'Options for selecting checks' ) action_options = argparser.add_argument_group( 'Options controlling actions' ) run_options = argparser.add_argument_group( 'Options controlling the execution of checks' ) env_options = argparser.add_argument_group( 'Options controlling the ReFrame environment' ) misc_options = argparser.add_argument_group('Miscellaneous options') # Output directory options output_options.add_argument( '--prefix', action='store', metavar='DIR', help='Set general directory prefix to DIR', envvar='RFM_PREFIX', configvar='systems/prefix' ) output_options.add_argument( '-o', '--output', action='store', metavar='DIR', help='Set output directory prefix to DIR', envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir' ) output_options.add_argument( '-s', '--stage', action='store', metavar='DIR', help='Set stage directory prefix to DIR', envvar='RFM_STAGE_DIR', configvar='systems/stagedir' ) output_options.add_argument( '--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT', help=('Append a timestamp to the output and stage directory prefixes ' '(default: ""%%FT%%T"")'), envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs' ) output_options.add_argument( '--perflogdir', action='store', metavar='DIR', help=('Set performance log data directory prefix ' '(relevant only to the filelog log handler)'), envvar='RFM_PERFLOG_DIR', configvar='logging/handlers_perflog/filelog_basedir' ) output_options.add_argument( '--keep-stage-files', action='store_true', help='Keep stage directories even for successful checks', envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files' ) output_options.add_argument( '--dont-restage', action='store_false', dest='clean_stagedir', help='Reuse the test stage directory', envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir' ) output_options.add_argument( '--save-log-files', action='store_true', default=False, help='Save ReFrame log files to the output directory', envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files' ) output_options.add_argument( '--report-file', action='store', metavar='FILE', help=""Store JSON run report in FILE"", envvar='RFM_REPORT_FILE', configvar='general/report_file' ) # Check discovery options locate_options.add_argument( '-c', '--checkpath', action='append', metavar='PATH', help=""Add PATH to the check search path list"", envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path' ) locate_options.add_argument( '-R', '--recursive', action='store_true', help='Search for checks in the search path recursively', envvar='RFM_CHECK_SEARCH_RECURSIVE', configvar='general/check_search_recursive' ) locate_options.add_argument( '--ignore-check-conflicts', action='store_true', help='Skip checks with conflicting names', envvar='RFM_IGNORE_CHECK_CONFLICTS', configvar='general/ignore_check_conflicts' ) # Select options select_options.add_argument( '-t', '--tag', action='append', dest='tags', metavar='PATTERN', default=[], help='Select checks with at least one tag matching PATTERN' ) select_options.add_argument( '-n', '--name', action='append', dest='names', default=[], metavar='PATTERN', help='Select checks whose name matches PATTERN' ) select_options.add_argument( '-x', '--exclude', action='append', dest='exclude_names', metavar='PATTERN', default=[], help='Exclude checks whose name matches PATTERN' ) select_options.add_argument( '-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN', help=('Select checks with at least one ' 'programming environment matching PATTERN') ) select_options.add_argument( '--failed', action='store_true', help=""Select failed test cases (only when '--restore-session' is used)"" ) select_options.add_argument( '--gpu-only', action='store_true', help='Select only GPU checks' ) select_options.add_argument( '--cpu-only', action='store_true', help='Select only CPU checks' ) # Action options action_options.add_argument( '-l', '--list', action='store_true', help='List the selected checks' ) action_options.add_argument( '-L', '--list-detailed', action='store_true', help='List the selected checks providing details for each test' ) action_options.add_argument( '--list-tags', action='store_true', help='List the unique tags found in the selected tests and exit' ) action_options.add_argument( '-r', '--run', action='store_true', help='Run the selected checks' ) action_options.add_argument( '--ci-generate', action='store', metavar='FILE', help=('Generate into FILE a Gitlab CI pipeline ' 'for the selected tests and exit'), ) # Run options run_options.add_argument( '-J', '--job-option', action='append', metavar='OPT', dest='job_options', default=[], help='Pass option OPT to job scheduler' ) run_options.add_argument( '--force-local', action='store_true', help='Force local execution of checks' ) run_options.add_argument( '--skip-sanity-check', action='store_true', help='Skip sanity checking' ) run_options.add_argument( '--skip-performance-check', action='store_true', help='Skip performance checking' ) run_options.add_argument( '--strict', action='store_true', help='Enforce strict performance checking' ) run_options.add_argument( '--skip-system-check', action='store_true', help='Skip system check' ) run_options.add_argument( '--skip-prgenv-check', action='store_true', help='Skip programming environment check' ) run_options.add_argument( '--exec-policy', metavar='POLICY', action='store', choices=['async', 'serial'], default='async', help='Set the execution policy of ReFrame (default: ""async"")' ) run_options.add_argument( '--mode', action='store', help='Execution mode to use' ) run_options.add_argument( '--max-retries', metavar='NUM', action='store', default=0, help='Set the maximum number of times a failed regression test ' 'may be retried (default: 0)' ) run_options.add_argument( '--maxfail', metavar='NUM', action='store', default=sys.maxsize, help='Exit after first NUM failures' ) run_options.add_argument( '--restore-session', action='store', nargs='?', const='', metavar='REPORT', help='Restore a testing session from REPORT file' ) run_options.add_argument( '--flex-alloc-nodes', action='store', dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None, help='Set strategy for the flexible node allocation (default: ""idle"").' ) run_options.add_argument( '--disable-hook', action='append', metavar='NAME', dest='hooks', default=[], help='Disable a pipeline hook for this run' ) # Environment options env_options.add_argument( '-M', '--map-module', action='append', metavar='MAPPING', dest='module_mappings', default=[], help='Add a module mapping', envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings' ) env_options.add_argument( '-m', '--module', action='append', default=[], metavar='MOD', dest='user_modules', help='Load module MOD before running any regression check', envvar='RFM_USER_MODULES ,', configvar='general/user_modules' ) env_options.add_argument( '--module-mappings', action='store', metavar='FILE', dest='module_map_file', help='Load module mappings from FILE', envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file' ) env_options.add_argument( '-u', '--unload-module', action='append', metavar='MOD', dest='unload_modules', default=[], help='Unload module MOD before running any regression check', envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules' ) env_options.add_argument( '--module-path', action='append', metavar='PATH', dest='module_paths', default=[], help='(Un)use module path PATH before running any regression check', ) env_options.add_argument( '--purge-env', action='store_true', dest='purge_env', default=False, help='Unload all modules before running any regression check', envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment' ) env_options.add_argument( '--non-default-craype', action='store_true', help='Test a non-default Cray Programming Environment', envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype' ) # Miscellaneous options misc_options.add_argument( '-C', '--config-file', action='store', dest='config_file', metavar='FILE', help='Set configuration file', envvar='RFM_CONFIG_FILE' ) misc_options.add_argument( '--nocolor', action='store_false', dest='colorize', help='Disable coloring of output', envvar='RFM_COLORIZE', configvar='general/colorize' ) misc_options.add_argument( '--failure-stats', action='store_true', help='Print failure statistics' ) misc_options.add_argument( '--performance-report', action='store_true', help='Print a report for performance tests' ) misc_options.add_argument( '--show-config', action='store', nargs='?', const='all', metavar='PARAM', help='Print the value of configuration parameter PARAM and exit' ) misc_options.add_argument( '--system', action='store', help='Load configuration for SYSTEM', envvar='RFM_SYSTEM' ) misc_options.add_argument( '--upgrade-config-file', action='store', metavar='OLD[:NEW]', help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax' ) misc_options.add_argument( '-V', '--version', action='version', version=osext.reframe_version() ) misc_options.add_argument( '-v', '--verbose', action='count', help='Increase verbosity level of output', envvar='RFM_VERBOSE', configvar='general/verbose' ) # Options not associated with command-line arguments argparser.add_argument( dest='graylog_server', envvar='RFM_GRAYLOG_ADDRESS', configvar='logging/handlers_perflog/graylog_address', help='Graylog server address' ) argparser.add_argument( dest='syslog_address', envvar='RFM_SYSLOG_ADDRESS', configvar='logging/handlers_perflog/syslog_address', help='Syslog server address' ) argparser.add_argument( dest='ignore_reqnodenotavail', envvar='RFM_IGNORE_REQNODENOTAVAIL', configvar='schedulers/ignore_reqnodenotavail', action='store_true', help='Graylog server address' ) argparser.add_argument( dest='use_login_shell', envvar='RFM_USE_LOGIN_SHELL', configvar='general/use_login_shell', action='store_true', help='Use a login shell for job scripts' ) # Parse command line options = argparser.parse_args() if len(sys.argv) == 1: argparser.print_help() sys.exit(1) # First configure logging with our generic configuration so as to be able # to print pretty messages; logging will be reconfigured by user's # configuration later site_config = config.load_config( os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py') ) site_config.select_subconfig('generic') options.update_config(site_config) logging.configure_logging(site_config) logging.getlogger().colorize = site_config.get('general/0/colorize') printer = PrettyPrinter() printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) if os.getenv('RFM_GRAYLOG_SERVER'): printer.warning( 'RFM_GRAYLOG_SERVER environment variable is deprecated; ' 'please use RFM_GRAYLOG_ADDRESS instead' ) os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER') if options.upgrade_config_file is not None: old_config, *new_config = options.upgrade_config_file.split( ':', maxsplit=1 ) new_config = new_config[0] if new_config else None try: new_config = config.convert_old_config(old_config, new_config) except Exception as e: printer.error(f'could not convert file: {e}') sys.exit(1) printer.info( f'Conversion successful! ' f'The converted file can be found at {new_config!r}.' ) sys.exit(0) # Now configure ReFrame according to the user configuration file try: try: printer.debug('Loading user configuration') site_config = config.load_config(options.config_file) except warnings.ReframeDeprecationWarning as e: printer.warning(e) converted = config.convert_old_config(options.config_file) printer.warning( f""configuration file has been converted "" f""to the new syntax here: '{converted}'"" ) site_config = config.load_config(converted) site_config.validate() # We ignore errors about unresolved sections or configuration # parameters here, because they might be defined at the individual # partition level and will be caught when we will instantiating # internally the system and partitions later on. site_config.select_subconfig(options.system, ignore_resolve_errors=True) for err in options.update_config(site_config): printer.warning(str(err)) # Update options from the selected execution mode if options.mode: mode_args = site_config.get(f'modes/@{options.mode}/options') # We lexically split the mode options, because otherwise spaces # will be treated as part of the option argument; see GH bug #1554 mode_args = list(itertools.chain.from_iterable(shlex.split(m) for m in mode_args)) # Parse the mode's options and reparse the command-line options = argparser.parse_args(mode_args) options = argparser.parse_args(namespace=options.cmd_options) options.update_config(site_config) logging.configure_logging(site_config) except (OSError, errors.ConfigError) as e: printer.error(f'failed to load configuration: {e}') printer.error(logfiles_message()) sys.exit(1) logging.getlogger().colorize = site_config.get('general/0/colorize') printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) try: printer.debug('Initializing runtime') runtime.init_runtime(site_config) except errors.ConfigError as e: printer.error(f'failed to initialize runtime: {e}') printer.error(logfiles_message()) sys.exit(1) rt = runtime.runtime() try: if site_config.get('general/0/module_map_file'): rt.modules_system.load_mapping_from_file( site_config.get('general/0/module_map_file') ) if site_config.get('general/0/module_mappings'): for m in site_config.get('general/0/module_mappings'): rt.modules_system.load_mapping(m) except (errors.ConfigError, OSError) as e: printer.error('could not load module mappings: %s' % e) sys.exit(1) if (osext.samefile(rt.stage_prefix, rt.output_prefix) and not site_config.get('general/0/keep_stage_files')): printer.error(""stage and output refer to the same directory; "" ""if this is on purpose, please use the "" ""'--keep-stage-files' option."") printer.error(logfiles_message()) sys.exit(1) # Show configuration after everything is set up if options.show_config: config_param = options.show_config if config_param == 'all': printer.info(str(rt.site_config)) else: value = rt.get_option(config_param) if value is None: printer.error( f'no such configuration parameter found: {config_param}' ) else: printer.info(json.dumps(value, indent=2)) sys.exit(0) printer.debug(format_env(options.env_vars)) # Setup the check loader if options.restore_session is not None: # We need to load the failed checks only from a report if options.restore_session: filename = options.restore_session else: filename = runreport.next_report_filename( osext.expandvars(site_config.get('general/0/report_file')), new=False ) report = runreport.load_report(filename) check_search_path = list(report.slice('filename', unique=True)) check_search_recursive = False # If `-c` or `-R` are passed explicitly outside the configuration # file, override the values set from the report file if site_config.is_sticky_option('general/check_search_path'): printer.warning( 'Ignoring check search path set in the report file: ' 'search path set explicitly in the command-line or ' 'the environment' ) check_search_path = site_config.get( 'general/0/check_search_path' ) if site_config.is_sticky_option('general/check_search_recursive'): printer.warning( 'Ignoring check search recursive option from the report file: ' 'option set explicitly in the command-line or the environment' ) check_search_recursive = site_config.get( 'general/0/check_search_recursive' ) else: check_search_recursive = site_config.get( 'general/0/check_search_recursive' ) check_search_path = site_config.get('general/0/check_search_path') loader = RegressionCheckLoader( load_path=check_search_path, recurse=check_search_recursive, ignore_conflicts=site_config.get( 'general/0/ignore_check_conflicts' ) ) def print_infoline(param, value): param = param + ':' printer.info(f"" {param.ljust(18)} {value}"") session_info = { 'cmdline': ' '.join(sys.argv), 'config_file': rt.site_config.filename, 'data_version': runreport.DATA_VERSION, 'hostname': socket.gethostname(), 'prefix_output': rt.output_prefix, 'prefix_stage': rt.stage_prefix, 'user': osext.osuser(), 'version': osext.reframe_version(), 'workdir': os.getcwd(), } # Print command line printer.info(f""[ReFrame Setup]"") print_infoline('version', session_info['version']) print_infoline('command', repr(session_info['cmdline'])) print_infoline( f""launched by"", f""{session_info['user'] or ''}@{session_info['hostname']}"" ) print_infoline('working directory', repr(session_info['workdir'])) print_infoline('settings file', f""{session_info['config_file']!r}"") print_infoline('check search path', f""{'(R) ' if loader.recurse else ''}"" f""{':'.join(loader.load_path)!r}"") print_infoline('stage directory', repr(session_info['prefix_stage'])) print_infoline('output directory', repr(session_info['prefix_output'])) printer.info('') try: # Locate and load checks try: checks_found = loader.load_all() printer.verbose(f'Loaded {len(checks_found)} test(s)') except OSError as e: raise errors.ReframeError from e # Generate all possible test cases first; we will need them for # resolving dependencies after filtering # Determine the allowed programming environments allowed_environs = {e.name for env_patt in options.prgenv for p in rt.system.partitions for e in p.environs if re.match(env_patt, e.name)} testcases_all = generate_testcases(checks_found, options.skip_system_check, options.skip_prgenv_check, allowed_environs) testcases = testcases_all printer.verbose(f'Generated {len(testcases)} test case(s)') # Filter test cases by name if options.exclude_names: for name in options.exclude_names: testcases = filter(filters.have_not_name(name), testcases) if options.names: testcases = filter( filters.have_name('|'.join(options.names)), testcases ) testcases = list(testcases) printer.verbose( f'Filtering test cases(s) by name: {len(testcases)} remaining' ) # Filter test cases by tags for tag in options.tags: testcases = filter(filters.have_tag(tag), testcases) testcases = list(testcases) printer.verbose( f'Filtering test cases(s) by tags: {len(testcases)} remaining' ) # Filter test cases further if options.gpu_only and options.cpu_only: printer.error(""options `--gpu-only' and `--cpu-only' "" ""are mutually exclusive"") sys.exit(1) if options.gpu_only: testcases = filter(filters.have_gpu_only(), testcases) elif options.cpu_only: testcases = filter(filters.have_cpu_only(), testcases) testcases = list(testcases) printer.verbose( f'Filtering test cases(s) by other attributes: ' f'{len(testcases)} remaining' ) # Filter in failed cases if options.failed: if options.restore_session is None: printer.error( ""the option '--failed' can only be used "" ""in combination with the '--restore-session' option"" ) sys.exit(1) def _case_failed(t): rec = report.case(*t) if not rec: return False return (rec['result'] == 'failure' or rec['result'] == 'aborted') testcases = list(filter(_case_failed, testcases)) printer.verbose( f'Filtering successful test case(s): ' f'{len(testcases)} remaining' ) # Prepare for running printer.debug('Building and validating the full test DAG') testgraph, skipped_cases = dependencies.build_deps(testcases_all) if skipped_cases: # Some cases were skipped, so adjust testcases testcases = list(set(testcases) - set(skipped_cases)) printer.verbose( f'Filtering test case(s) due to unresolved dependencies: ' f'{len(testcases)} remaining' ) dependencies.validate_deps(testgraph) printer.debug('Full test DAG:') printer.debug(dependencies.format_deps(testgraph)) restored_cases = [] if len(testcases) != len(testcases_all): testgraph = dependencies.prune_deps( testgraph, testcases, max_depth=1 if options.restore_session is not None else None ) printer.debug('Pruned test DAG') printer.debug(dependencies.format_deps(testgraph)) if options.restore_session is not None: testgraph, restored_cases = report.restore_dangling(testgraph) testcases = dependencies.toposort( testgraph, is_subgraph=options.restore_session is not None ) printer.verbose(f'Final number of test cases: {len(testcases)}') # Disable hooks for tc in testcases: for h in options.hooks: tc.check.disable_hook(h) # Act on checks if options.list or options.list_detailed: list_checks(testcases, printer, options.list_detailed) sys.exit(0) if options.list_tags: list_tags(testcases, printer) sys.exit(0) if options.ci_generate: list_checks(testcases, printer) printer.info('[Generate CI]') with open(options.ci_generate, 'wt') as fp: ci.emit_pipeline(fp, testcases) printer.info( f' Gitlab pipeline generated successfully ' f'in {options.ci_generate!r}.\n' ) sys.exit(0) if not options.run: printer.error(""No action option specified. Available options:\n"" "" - `-l'/`-L' for listing\n"" "" - `-r' for running\n"" "" - `--ci-generate' for generating a CI pipeline\n"" f""Try `{argparser.prog} -h' for more options."") sys.exit(1) # Manipulate ReFrame's environment if site_config.get('general/0/purge_environment'): rt.modules_system.unload_all() else: for m in site_config.get('general/0/unload_modules'): rt.modules_system.unload_module(**m) # Load the environment for the current system try: printer.debug(f'Loading environment for current system') runtime.loadenv(rt.system.preload_environ) except errors.EnvironError as e: printer.error(""failed to load current system's environment; "" ""please check your configuration"") printer.debug(str(e)) raise def module_use(*paths): try: rt.modules_system.searchpath_add(*paths) except errors.EnvironError as e: printer.warning(f'could not add module paths correctly') printer.debug(str(e)) def module_unuse(*paths): try: rt.modules_system.searchpath_remove(*paths) except errors.EnvironError as e: printer.warning(f'could not remove module paths correctly') printer.debug(str(e)) printer.debug('(Un)using module paths from command line') module_paths = {} for d in options.module_paths: if d.startswith('-'): module_paths.setdefault('-', []) module_paths['-'].append(d[1:]) elif d.startswith('+'): module_paths.setdefault('+', []) module_paths['+'].append(d[1:]) else: module_paths.setdefault('x', []) module_paths['x'].append(d) for op, paths in module_paths.items(): if op == '+': module_use(*paths) elif op == '-': module_unuse(*paths) else: # First empty the current module path in a portable way searchpath = [p for p in rt.modules_system.searchpath if p] if searchpath: rt.modules_system.searchpath_remove(*searchpath) # Treat `A:B` syntax as well in this case paths = itertools.chain(*(p.split(':') for p in paths)) module_use(*paths) printer.debug('Loading user modules from command line') for m in site_config.get('general/0/user_modules'): try: rt.modules_system.load_module(**m, force=True) except errors.EnvironError as e: printer.warning( f'could not load module {m[""name""]!r} correctly; ' f'skipping...' ) printer.debug(str(e)) options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle' # Run the tests # Setup the execution policy if options.exec_policy == 'serial': exec_policy = SerialExecutionPolicy() elif options.exec_policy == 'async': exec_policy = AsynchronousExecutionPolicy() else: # This should not happen, since choices are handled by # argparser printer.error(""unknown execution policy `%s': Exiting..."") sys.exit(1) exec_policy.skip_system_check = options.skip_system_check exec_policy.force_local = options.force_local exec_policy.strict_check = options.strict exec_policy.skip_sanity_check = options.skip_sanity_check exec_policy.skip_performance_check = options.skip_performance_check exec_policy.keep_stage_files = site_config.get( 'general/0/keep_stage_files' ) try: errmsg = ""invalid option for --flex-alloc-nodes: '{0}'"" sched_flex_alloc_nodes = int(options.flex_alloc_nodes) if sched_flex_alloc_nodes <= 0: raise errors.ConfigError( errmsg.format(options.flex_alloc_nodes) ) except ValueError: sched_flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes parsed_job_options = [] for opt in options.job_options: opt_split = opt.split('=', maxsplit=1) optstr = opt_split[0] valstr = opt_split[1] if len(opt_split) > 1 else '' if opt.startswith('-') or opt.startswith('#'): parsed_job_options.append(opt) elif len(optstr) == 1: parsed_job_options.append(f'-{optstr} {valstr}') else: parsed_job_options.append(f'--{optstr} {valstr}') exec_policy.sched_options = parsed_job_options try: max_retries = int(options.max_retries) except ValueError: raise errors.ConfigError( f'--max-retries is not a valid integer: {max_retries}' ) from None try: max_failures = int(options.maxfail) if max_failures < 0: raise errors.ConfigError( f'--maxfail should be a non-negative integer: ' f'{options.maxfail!r}' ) except ValueError: raise errors.ConfigError( f'--maxfail is not a valid integer: {options.maxfail!r}' ) from None runner = Runner(exec_policy, printer, max_retries, max_failures) try: time_start = time.time() session_info['time_start'] = time.strftime( '%FT%T%z', time.localtime(time_start), ) runner.runall(testcases, restored_cases) finally: time_end = time.time() session_info['time_end'] = time.strftime( '%FT%T%z', time.localtime(time_end) ) session_info['time_elapsed'] = time_end - time_start # Print a retry report if we did any retries if runner.stats.failed(run=0): printer.info(runner.stats.retry_report()) # Print a failure report if we had failures in the last run success = True if runner.stats.failed(): success = False runner.stats.print_failure_report(printer) if options.failure_stats: runner.stats.print_failure_stats(printer) if options.performance_report: printer.info(runner.stats.performance_report()) # Generate the report for this session report_file = os.path.normpath( osext.expandvars(rt.get_option('general/0/report_file')) ) basedir = os.path.dirname(report_file) if basedir: os.makedirs(basedir, exist_ok=True) # Build final JSON report run_stats = runner.stats.json() session_info.update({ 'num_cases': run_stats[0]['num_cases'], 'num_failures': run_stats[-1]['num_failures'] }) json_report = { 'session_info': session_info, 'runs': run_stats, 'restored_cases': [] } if options.restore_session is not None: for c in restored_cases: json_report['restored_cases'].append(report.case(*c)) report_file = runreport.next_report_filename(report_file) try: with open(report_file, 'w') as fp: jsonext.dump(json_report, fp, indent=2) fp.write('\n') except OSError as e: printer.warning( f'failed to generate report in {report_file!r}: {e}' ) if not success: sys.exit(1) sys.exit(0) except (Exception, KeyboardInterrupt, errors.ReframeFatalError): exc_info = sys.exc_info() tb = ''.join(traceback.format_exception(*exc_info)) printer.error(f'run session stopped: {errors.what(*exc_info)}') if errors.is_exit_request(*exc_info): # Print stack traces for exit requests only when TOO verbose printer.debug2(tb) elif errors.is_severe(*exc_info): printer.error(tb) else: printer.verbose(tb) sys.exit(1) finally: try: log_files = logging.log_files() if site_config.get('general/0/save_log_files'): log_files = logging.save_log_files(rt.output_prefix) except OSError as e: printer.error(f'could not save log file: {e}') sys.exit(1) finally: printer.info(logfiles_message()) " 45170,"def Scheduled( cls: Type[State] = State, scheduled_time: datetime.datetime = None, **kwargs ) -> State: """"""Convenience function for creating `Scheduled` states. Returns: State: a Scheduled state """""" return schemas.states.Scheduled(cls=cls, scheduled_time=scheduled_time, **kwargs) ","def Scheduled( cls: Type[State] = State, scheduled_time: datetime.datetime = None, **kwargs ) -> State: """"""Convenience function for creating `Scheduled` states. Returns: State: a `Scheduled` state """""" return schemas.states.Scheduled(cls=cls, scheduled_time=scheduled_time, **kwargs) " 35246,"def polyder(p, m=1): """"""Returns the derivative of the specified order of a polynomial. Parameters ---------- p : poly1d or sequence Polynomial to differentiate m : int, optional Order of differentiation. By default, 1 Returns ------- der : poly1d A new polynomial representing the derivative. See Also -------- numpy.polyder """""" m = int(m) if m < 0: raise ValueError(""Order of derivative must be positive."") truepoly = isinstance(p, cupy.poly1d) p = cupy.asarray(p) n = len(p) - 1 y = p[:-1] * cupy.arange(n, 0, -1) if m == 0: val = p else: val = polyder(y, m - 1) if truepoly: val = cupy.poly1d(val) return val ","def polyder(p, m=1): """"""Returns the derivative of the specified order of a polynomial. Parameters ---------- p : poly1d or cupy.ndarray Polynomial to differentiate m : int, optional Order of differentiation. By default, 1 Returns ------- der : poly1d A new polynomial representing the derivative. See Also -------- numpy.polyder """""" m = int(m) if m < 0: raise ValueError(""Order of derivative must be positive."") truepoly = isinstance(p, cupy.poly1d) p = cupy.asarray(p) n = len(p) - 1 y = p[:-1] * cupy.arange(n, 0, -1) if m == 0: val = p else: val = polyder(y, m - 1) if truepoly: val = cupy.poly1d(val) return val " 30581,"def list_collections_command(client: Client, **kwargs) -> Tuple[str, dict, list]: raw_response = client.db.list_collection_names() if len(raw_response): readable_outputs = tableToMarkdown( 'MongoDB: All collections in database', raw_response, headers=['Collection'] ) outputs = { 'MongoDB.Collection(val.Name === obj.Name)': [ {'Name': collection} for collection in raw_response ] } return readable_outputs, outputs, raw_response else: return ""MongoDB: No results found"", {}, raw_response ","def list_collections_command(client: Client, **kwargs) -> Tuple[str, dict, list]: raw_response = client.db.list_collection_names() if len(raw_response): readable_outputs = tableToMarkdown( 'MongoDB: All collections in database', raw_response, headers=['Collection'] ) outputs = { 'MongoDB.Collection(val.Name === obj.Name)': [ {'Name': collection} for collection in raw_response ] } return readable_outputs, outputs, raw_response else: return 'MongoDB: No results found', {}, raw_response " 43048,"def TimeEvolution(t: float, w: np.ndarray): r""""""Generates a custom ``sf`` operation for performing the transformation :math:`e^{-i\hat{H}t/\hbar}` on a given state. The custom operation returned by this function can be used as part of a Strawberry Fields :class:`~.Program` just like any other operation from the :mod:`~.ops` module. **Example usage:** >>> modes = 2 >>> p = sf.Program(modes) >>> with p.context as q: >>> sf.ops.Fock(1) | q[0] >>> sf.ops.Interferometer(Ul.T) | q >>> TimeEvolution(t, w) | q >>> sf.ops.Interferometer(Ul) | q Args: t (float): time in femtoseconds w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}` that compose the Hamiltonian :math:`\hat{H} = \sum_i \hbar \omega_i a_i^\dagger a_i` Returns: an ``sf`` operation for enacting the dynamics transformation Return type: op """""" # pylint: disable=expression-not-assigned n_modes = len(w) @operation(n_modes) def op(q): theta = -w * 100.0 * c * 1.0e-15 * t * (2.0 * pi) for i in range(n_modes): sf.ops.Rgate(theta[i]) | q[i] return op() ","def TimeEvolution(t: float, w: np.ndarray): r""""""Generates a custom ``sf`` operation for performing the transformation :math:`e^{-i\hat{H}t/\hbar}` on a given state. The custom operation returned by this function can be used as part of a Strawberry Fields other operation from the :mod:`~.ops` module. **Example usage:** >>> modes = 2 >>> p = sf.Program(modes) >>> with p.context as q: >>> sf.ops.Fock(1) | q[0] >>> sf.ops.Interferometer(Ul.T) | q >>> TimeEvolution(t, w) | q >>> sf.ops.Interferometer(Ul) | q Args: t (float): time in femtoseconds w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}` that compose the Hamiltonian :math:`\hat{H} = \sum_i \hbar \omega_i a_i^\dagger a_i` Returns: an ``sf`` operation for enacting the dynamics transformation Return type: op """""" # pylint: disable=expression-not-assigned n_modes = len(w) @operation(n_modes) def op(q): theta = -w * 100.0 * c * 1.0e-15 * t * (2.0 * pi) for i in range(n_modes): sf.ops.Rgate(theta[i]) | q[i] return op() " 55805,"def remove_frameworks_with_unknown_parent(frameworks: Namespace): frameworks_with_unknown_parent = [ (name, framework.extends) for name, framework in frameworks if ""extends"" in framework and framework.extends not in frameworks ] for framework, parent in frameworks_with_unknown_parent: log.warning(""Removing framework %s as parent %s doesn't exist."", framework, parent) del frameworks[framework]","def remove_frameworks_with_unknown_parent(frameworks: Namespace): frameworks_with_unknown_parent = [ (name, framework.extends) for name, framework in frameworks if ""extends"" in framework and framework.extends not in frameworks ] for framework, parent in frameworks_with_unknown_parent: log.warning(""Removing framework %s as parent %s doesn't exist."", framework, parent) del frameworks[framework] " 30718,"def upload_files(excluded_files, dir_path, file_path): """""" :param excluded_files: excluded files :param dir_path: dir path for the files :param file_path: the path to the pcap file :return: """""" filenames = [] # type: ignore # recursive call over the file system top down for root, directories, files in os.walk(dir_path): for f in files: # skipping previously existing files # adding it to the extracted pcap files list if f not in excluded_files and isfile(os.path.join(root, f)): filenames.append(os.path.join(root, f)) if len(filenames) == 0: return_error('Could not find files') else: results = [] context = [] protocol, packet_data = find_files_protocol(file_path) md5 = hashlib.md5() sha1 = hashlib.sha1() sha256 = hashlib.sha256() files_base_names = [os.path.basename(file_path) for file_path in filenames] # noqa[F812] files_dic = {file_path: os.path.basename(file_path) for file_path in filenames} for file_path, file_name in files_dic.items(): for data in packet_data: packet_number = data.split()[0] for packet_number in packet_data: data = [i for i in packet_number.split()] source_ip = data[2] dest_ip = data[4] with open(file_path, 'rb') as _file: demisto.results(fileResult(file_name, _file.read())) with open(file_path, 'rb') as _file: data = _file.read() md5.update(data) sha1.update(data) sha256.update(data) context.append({ 'FileMD5': md5.hexdigest(), 'FileSHA1': sha1.hexdigest(), 'FileSHA256': sha256.hexdigest(), 'FileName': file_name, 'FileSize': os.path.getsize(file_path), 'DetectedInProtocol': protocol, 'FileExtension': os.path.splitext(file_name)[1], 'SourceIP': source_ip, 'DestinationIP': dest_ip }) ec = { 'PcapExtractedFiles(val.FileMD5 === obj.FileMD5)': context } results.append( { 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': {'extractedFiles': files_base_names}, 'EntryContext': ec, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Pcap Extracted Files', [{'name': file_name} for file_name in files_base_names]) }) demisto.results(results) ","def upload_files(excluded_files, dir_path, file_path): """""" :param excluded_files: excluded files :param dir_path: dir path for the files :param file_path: the path to the pcap file :return: """""" filenames = [] # type: ignore # recursive call over the file system top down for root, directories, files in os.walk(dir_path): for f in files: # skipping previously existing files # adding it to the extracted pcap files list if f not in excluded_files and isfile(os.path.join(root, f)): filenames.append(os.path.join(root, f)) if len(filenames) == 0: return_error('Could not find files') else: results = [] context = [] protocol, packet_data = find_files_protocol(file_path) md5 = hashlib.md5() sha1 = hashlib.sha1() sha256 = hashlib.sha256() files_base_names = [os.path.basename(file_path) for file_path in filenames] # noqa[F812] files_dic = {file_path: os.path.basename(file_path) for file_path in filenames} for file_path, file_name in files_dic.items(): for data in packet_data: packet_number = data.split()[0] for packet_number in packet_data: data = [i for i in packet_number.split()] source_ip = data[2] dest_ip = data[4] with open(file_path, 'rb') as file: demisto.results(fileResult(file_name, _file.read())) with open(file_path, 'rb') as _file: data = _file.read() md5.update(data) sha1.update(data) sha256.update(data) context.append({ 'FileMD5': md5.hexdigest(), 'FileSHA1': sha1.hexdigest(), 'FileSHA256': sha256.hexdigest(), 'FileName': file_name, 'FileSize': os.path.getsize(file_path), 'DetectedInProtocol': protocol, 'FileExtension': os.path.splitext(file_name)[1], 'SourceIP': source_ip, 'DestinationIP': dest_ip }) ec = { 'PcapExtractedFiles(val.FileMD5 === obj.FileMD5)': context } results.append( { 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': {'extractedFiles': files_base_names}, 'EntryContext': ec, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Pcap Extracted Files', [{'name': file_name} for file_name in files_base_names]) }) demisto.results(results) " 25213,"def _object_type( node: SuccessfulInferenceResult, context: InferenceContext | None = None ) -> Generator[InferenceResult | None, None, None]: astroid_manager = manager.AstroidManager() builtins = astroid_manager.builtins_module context = context or InferenceContext() for inferred in node.infer(context=context): if isinstance(inferred, scoped_nodes.ClassDef): if inferred.newstyle: metaclass = inferred.metaclass(context=context) if metaclass: yield metaclass continue yield builtins.getattr(""type"")[0] elif isinstance(inferred, (scoped_nodes.Lambda, bases.UnboundMethod)): yield _function_type(inferred, builtins) elif isinstance(inferred, scoped_nodes.Module): yield _build_proxy_class(""module"", builtins) elif isinstance(inferred, nodes.Unknown): raise InferenceError elif inferred is util.Uninferable: yield inferred elif isinstance(inferred, (bases.Proxy, nodes.Slice)): yield inferred._proxied else: # pragma: no cover # We don't handle other node types returned by infer currently raise AssertionError() ","def _object_type( node: SuccessfulInferenceResult, context: InferenceContext | None = None ) -> Generator[InferenceResult | None, None, None]: astroid_manager = manager.AstroidManager() builtins = astroid_manager.builtins_module context = context or InferenceContext() for inferred in node.infer(context=context): if isinstance(inferred, scoped_nodes.ClassDef): if inferred.newstyle: metaclass = inferred.metaclass(context=context) if metaclass: yield metaclass continue yield builtins.getattr(""type"")[0] elif isinstance(inferred, (scoped_nodes.Lambda, bases.UnboundMethod)): yield _function_type(inferred, builtins) elif isinstance(inferred, scoped_nodes.Module): yield _build_proxy_class(""module"", builtins) elif isinstance(inferred, nodes.Unknown): raise InferenceError elif inferred is util.Uninferable: yield inferred elif isinstance(inferred, (bases.Proxy, nodes.Slice)): yield inferred._proxied else: # pragma: no cover raise AssertionError(f""We don't handle {type(inferred)} currently"") " 5823,"def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2, equal_var=True, alternative=""two-sided""): r"""""" T-test for means of two independent samples from descriptive statistics. This is a test for the null hypothesis that two independent samples have identical average (expected) values. Parameters ---------- mean1 : array_like The mean(s) of sample 1. std1 : array_like The unbiased estimate of the standard deviation(s) of sample 1 (i.e. `ddof=1`). nobs1 : array_like The number(s) of observations of sample 1. mean2 : array_like The mean(s) of sample 2. std2 : array_like The unbiased estimate of the standard deviations(s) of sample 2 (i.e. `ddof=1`). nobs2 : array_like The number(s) of observations of sample 2. equal_var : bool, optional If True (default), perform a standard independent 2 sample test that assumes equal population variances [1]_. If False, perform Welch's t-test, which does not assume equal population variance [2]_. alternative : {'two-sided', 'less', 'greater'}, optional Defines the alternative hypothesis. The following options are available (default is 'two-sided'): * 'two-sided': the means of the distributions are unequal. * 'less': the mean of the first distribution is less than the mean of the second distribution. * 'greater': the mean of the first distribution is greater than the mean of the second distribution. .. versionadded:: 1.6.0 Returns ------- statistic : float or array The calculated t-statistics. pvalue : float or array The two-tailed p-value. See Also -------- scipy.stats.ttest_ind Notes ----- The statistic is calculated as ``(mean1 - mean2)/se``, where ``se`` is the standard error. Therefore, the statistic will be positive when `mean1` is greater than `mean2` and negative when `mean1` is less than `mean2`. References ---------- .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test Examples -------- Suppose we have the summary data for two samples, as follows (with the Samples Variance being the unbiased estimate):: Sample Sample Size Mean Variance Sample 1 13 15.0 87.5 Sample 2 11 12.0 39.0 Apply the t-test to this data (with the assumption that the population variances are equal): >>> from scipy.stats import ttest_ind_from_stats >>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13, ... mean2=12.0, std2=np.sqrt(39.0), nobs2=11) Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487) For comparison, here is the data from which those summary statistics were taken. With this data, we can compute the same result using `scipy.stats.ttest_ind`: >>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26]) >>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21]) >>> from scipy.stats import ttest_ind >>> ttest_ind(a, b) Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486) Suppose we instead have binary data and would like to apply a t-test to compare the proportion of 1s in two independent groups:: Number of Sample Sample Size ones Mean Variance Sample 1 150 30 0.2 0.16 Sample 2 200 45 0.225 0.174375 The sample mean :math:`\hat{p}` is the proportion of ones in the sample and the variance for a binary observation is estimated by :math:`\hat{p}(1-\hat{p})`. >>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150, ... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200) Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874) For comparison, we could compute the t statistic and p-value using arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above. >>> group1 = np.array([1]*30 + [0]*(150-30)) >>> group2 = np.array([1]*45 + [0]*(200-45)) >>> ttest_ind(group1, group2) Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258) """""" mean1 = np.asarray(mean1) std1 = np.asarray(std1) mean2 = np.asarray(mean2) std2 = np.asarray(std2) if equal_var: df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2) else: df, denom = _unequal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2) res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative) return Ttest_indResult(*res) ","def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2, equal_var=True, alternative=""two-sided""): r"""""" T-test for means of two independent samples from descriptive statistics. This is a test for the null hypothesis that two independent samples have identical average (expected) values. Parameters ---------- mean1 : array_like The mean(s) of sample 1. std1 : array_like The unbiased estimate of the standard deviation(s) of sample 1 (i.e. ``ddof=1``). nobs1 : array_like The number(s) of observations of sample 1. mean2 : array_like The mean(s) of sample 2. std2 : array_like The unbiased estimate of the standard deviations(s) of sample 2 (i.e. `ddof=1`). nobs2 : array_like The number(s) of observations of sample 2. equal_var : bool, optional If True (default), perform a standard independent 2 sample test that assumes equal population variances [1]_. If False, perform Welch's t-test, which does not assume equal population variance [2]_. alternative : {'two-sided', 'less', 'greater'}, optional Defines the alternative hypothesis. The following options are available (default is 'two-sided'): * 'two-sided': the means of the distributions are unequal. * 'less': the mean of the first distribution is less than the mean of the second distribution. * 'greater': the mean of the first distribution is greater than the mean of the second distribution. .. versionadded:: 1.6.0 Returns ------- statistic : float or array The calculated t-statistics. pvalue : float or array The two-tailed p-value. See Also -------- scipy.stats.ttest_ind Notes ----- The statistic is calculated as ``(mean1 - mean2)/se``, where ``se`` is the standard error. Therefore, the statistic will be positive when `mean1` is greater than `mean2` and negative when `mean1` is less than `mean2`. References ---------- .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test Examples -------- Suppose we have the summary data for two samples, as follows (with the Samples Variance being the unbiased estimate):: Sample Sample Size Mean Variance Sample 1 13 15.0 87.5 Sample 2 11 12.0 39.0 Apply the t-test to this data (with the assumption that the population variances are equal): >>> from scipy.stats import ttest_ind_from_stats >>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13, ... mean2=12.0, std2=np.sqrt(39.0), nobs2=11) Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487) For comparison, here is the data from which those summary statistics were taken. With this data, we can compute the same result using `scipy.stats.ttest_ind`: >>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26]) >>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21]) >>> from scipy.stats import ttest_ind >>> ttest_ind(a, b) Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486) Suppose we instead have binary data and would like to apply a t-test to compare the proportion of 1s in two independent groups:: Number of Sample Sample Size ones Mean Variance Sample 1 150 30 0.2 0.16 Sample 2 200 45 0.225 0.174375 The sample mean :math:`\hat{p}` is the proportion of ones in the sample and the variance for a binary observation is estimated by :math:`\hat{p}(1-\hat{p})`. >>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150, ... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200) Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874) For comparison, we could compute the t statistic and p-value using arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above. >>> group1 = np.array([1]*30 + [0]*(150-30)) >>> group2 = np.array([1]*45 + [0]*(200-45)) >>> ttest_ind(group1, group2) Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258) """""" mean1 = np.asarray(mean1) std1 = np.asarray(std1) mean2 = np.asarray(mean2) std2 = np.asarray(std2) if equal_var: df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2) else: df, denom = _unequal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2) res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative) return Ttest_indResult(*res) " 45706,"def forecast( vil, velocity, timesteps, rainrate=None, n_cascade_levels=8, extrap_method=""semilagrangian"", ar_order=2, ar_window_radius=50, r_vil_window_radius=3, fft_method=""numpy"", apply_rainrate_mask=True, num_workers=1, extrap_kwargs=None, filter_kwargs=None, measure_time=False, ): """"""Generate a nowcast by using the autoregressive nowcasting using VIL (ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast. The key features are: 1) Growth and decay: implemented by using a cascade decomposition and a multiscale autoregressive integrated ARI(p,1) model. Instead of the original time series, the ARI model is applied to the differenced one corresponding to time derivatives. 2) Originally designed for using integrated liquid (VIL) as the input data. In this case, the rain rate (R) is obtained from VIL via an empirical relation. This implementation is more general so that the input can be any two-dimensional precipitation field. 3) The parameters of the ARI model and the R(VIL) relation are allowed to be spatially variable. The estimation is done using a moving window. Parameters ---------- vil: array_like Array of shape (ar_order+2,m,n) containing the input fields ordered by timestamp from oldest to newest. The inputs are expected to contain VIL or rain rate. The time steps between the inputs are assumed to be regular. velocity: array_like Array of shape (2,m,n) containing the x- and y-components of the advection field. The velocities are assumed to represent one time step between the inputs. All values are required to be finite. timesteps: int or list Number of time steps to forecast or a list of time steps for which the forecasts are computed (relative to the input time step). The elements of the list are required to be in ascending order. rainrate: array_like Array of shape (m,n) containing the most recently observed rain rate field. If set to None, no R(VIL) conversion is done and the outputs are in the same units as the inputs. n_cascade_levels: int, optional The number of cascade levels to use. extrap_method: str, optional Name of the extrapolation method to use. See the documentation of pysteps.extrapolation.interface. ar_order: int, optional The order of the autoregressive model to use. The recommended values are 1 or 2. Using a higher-order model is strongly discouraged because the stationarity of the AR process cannot be guaranteed. ar_window_radius: int, optional The radius of the window to use for determining the parameters of the autoregressive model. Set to None to disable localization. r_vil_window_radius: int, optional The radius of the window to use for determining the R(VIL) relation. Applicable if rainrate is not None. fft_method: str, optional A string defining the FFT method to use (see utils.fft.get_method). Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed, the recommended method is 'pyfftw'. apply_rainrate_mask: bool Apply mask to prevent producing precipitation to areas where it was not originally observed. Defaults to True. Disabling this may improve some verification metrics but increases the number of false alarms. Applicable if rainrate is None. num_workers: int, optional The number of workers to use for parallel computation. Applicable if dask is installed or pyFFTW is used for computing the FFT. When num_workers>1, it is advisable to disable OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous threads. extrap_kwargs: dict, optional Optional dictionary containing keyword arguments for the extrapolation method. See the documentation of pysteps.extrapolation. filter_kwargs: dict, optional Optional dictionary containing keyword arguments for the filter method. See the documentation of pysteps.cascade.bandpass_filters.py. measure_time: bool, optional If True, measure, print and return the computation time. Returns ------- out: ndarray A three-dimensional array of shape (num_timesteps,m,n) containing a time series of forecast precipitation fields. The time series starts from t0+timestep, where timestep is taken from the input VIL/rain rate fields. If measure_time is True, the return value is a three-element tuple containing the nowcast array, the initialization time of the nowcast generator and the time used in the main loop (seconds). References ---------- :cite:`PCLH2020` """""" _check_inputs(vil, rainrate, velocity, timesteps, ar_order) if extrap_kwargs is None: extrap_kwargs = dict() else: extrap_kwargs = extrap_kwargs.copy() if filter_kwargs is None: filter_kwargs = dict() print(""Computing ANVIL nowcast:"") print(""------------------------"") print("""") print(""Inputs:"") print(""-------"") print(""input dimensions: %dx%d"" % (vil.shape[1], vil.shape[2])) print("""") print(""Methods:"") print(""--------"") print(""extrapolation: %s"" % extrap_method) print(""FFT: %s"" % fft_method) print("""") print(""Parameters:"") print(""-----------"") if isinstance(timesteps, int): print(""number of time steps: %d"" % timesteps) else: print(""time steps: %s"" % timesteps) print(""parallel threads: %d"" % num_workers) print(""number of cascade levels: %d"" % n_cascade_levels) print(""order of the ARI(p,1) model: %d"" % ar_order) if type(ar_window_radius) == int: print(""ARI(p,1) window radius: %d"" % ar_window_radius) else: print(""ARI(p,1) window radius: none"") print(""R(VIL) window radius: %d"" % r_vil_window_radius) if measure_time: starttime_init = time.time() m, n = vil.shape[1:] vil = vil.copy() if rainrate is None and apply_rainrate_mask: rainrate_mask = vil[-1, :] < 0.1 if rainrate is not None: # determine the coefficients fields of the relation R=a*VIL+b by # localized linear regression r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius) # transform the input fields to Lagrangian coordinates by extrapolation extrapolator = extrapolation.get_method(extrap_method) res = list() def worker(vil, i): return ( i, extrapolator( vil[i, :], velocity, vil.shape[0] - 1 - i, allow_nonfinite_values=True, **extrap_kwargs, )[-1], ) for i in range(vil.shape[0] - 1): if not DASK_IMPORTED or num_workers == 1: vil[i, :, :] = worker(vil, i)[1] else: res.append(dask.delayed(worker)(vil, i)) if DASK_IMPORTED and num_workers > 1: num_workers_ = len(res) if num_workers > len(res) else num_workers vil_e = dask.compute(*res, num_workers=num_workers_) for i in range(len(vil_e)): vil[vil_e[i][0], :] = vil_e[i][1] # compute the final mask as the intersection of the masks of the advected # fields mask = np.isfinite(vil[0, :]) for i in range(1, vil.shape[0]): mask = np.logical_and(mask, np.isfinite(vil[i, :])) if rainrate is None and apply_rainrate_mask: rainrate_mask = np.logical_and(rainrate_mask, mask) # apply cascade decomposition to the advected input fields bp_filter_method = cascade.get_method(""gaussian"") bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs) fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers) decomp_method, recomp_method = cascade.get_method(""fft"") vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n)) for i in range(vil.shape[0]): vil_ = vil[i, :].copy() vil_[~np.isfinite(vil_)] = 0.0 vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft) for j in range(n_cascade_levels): vil_dec[j, i, :] = vil_dec_i[""cascade_levels""][j, :] # compute time-lagged correlation coefficients for the cascade levels of # the advected and differenced input fields gamma = np.empty((n_cascade_levels, ar_order, m, n)) for i in range(n_cascade_levels): vil_diff = np.diff(vil_dec[i, :], axis=0) vil_diff[~np.isfinite(vil_diff)] = 0.0 for j in range(ar_order): gamma[i, j, :] = _moving_window_corrcoef( vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius ) if ar_order == 2: # if the order of the ARI model is 2, adjust the correlation coefficients # so that the resulting process is stationary for i in range(n_cascade_levels): gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2( gamma[i, 0, :], gamma[i, 1, :] ) # estimate the parameters of the ARI models phi = [] for i in range(n_cascade_levels): if ar_order > 2: phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1) elif ar_order == 2: phi_ = _estimate_ar2_params(gamma[i, :]) else: phi_ = _estimate_ar1_params(gamma[i, :]) phi.append(phi_) vil_dec = vil_dec[:, -(ar_order + 1) :, :] if measure_time: init_time = time.time() - starttime_init print(""Starting nowcast computation."") if measure_time: starttime_mainloop = time.time() r_f = [] if isinstance(timesteps, int): timesteps = range(timesteps + 1) timestep_type = ""int"" else: original_timesteps = [0] + list(timesteps) timesteps = nowcast_utils.binned_timesteps(original_timesteps) timestep_type = ""list"" if rainrate is not None: r_f_prev = r_vil_a * vil[-1, :] + r_vil_b else: r_f_prev = vil[-1, :] extrap_kwargs[""return_displacement""] = True dp = None t_nowcast = 0 t_prev = 0.0 for t in range(len(timesteps)): if timestep_type == ""list"": subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]] else: subtimesteps = [t] if len(subtimesteps) > 1 or t > 0: nowcast_time_step = True else: nowcast_time_step = False if nowcast_time_step: print( ""Computing nowcast for time step %d... "" % (t_nowcast + 1), end="""", flush=True, ) t_nowcast += 1 if measure_time: starttime = time.time() # iterate the ARI models for each cascade level for i in range(n_cascade_levels): vil_dec[i, :] = autoregression.iterate_ar_model(vil_dec[i, :], phi[i]) # recompose the cascade to obtain the forecast field vil_dec_dict = {} vil_dec_dict[""cascade_levels""] = vil_dec[:, -1, :] vil_dec_dict[""domain""] = ""spatial"" vil_dec_dict[""normalized""] = False vil_f = recomp_method(vil_dec_dict) vil_f[~mask] = np.nan if rainrate is not None: # convert VIL to rain rate r_f_new = r_vil_a * vil_f + r_vil_b else: r_f_new = vil_f if apply_rainrate_mask: r_f_new[rainrate_mask] = 0.0 r_f_new[r_f_new < 0.0] = 0.0 # advect the recomposed field to obtain the forecast for the current # time step (or subtimesteps if non-integer time steps are given) for t_sub in subtimesteps: if t_sub > 0: t_diff_prev_int = t_sub - int(t_sub) if t_diff_prev_int > 0.0: r_f_ip = ( 1.0 - t_diff_prev_int ) * r_f_prev + t_diff_prev_int * r_f_new else: r_f_ip = r_f_prev t_diff_prev = t_sub - t_prev extrap_kwargs[""displacement_prev""] = dp r_f_ep, dp = extrapolator( r_f_ip, velocity, [t_diff_prev], allow_nonfinite_values=True, **extrap_kwargs, ) r_f.append(r_f_ep[0]) t_prev = t_sub # advect the forecast field by one time step if no subtimesteps in the # current interval were found if len(subtimesteps) == 0: t_diff_prev = t + 1 - t_prev extrap_kwargs[""displacement_prev""] = dp _, dp = extrapolator( None, velocity, [t_diff_prev], allow_nonfinite_values=True, **extrap_kwargs, ) t_prev = t + 1 r_f_prev = r_f_new if nowcast_time_step: if measure_time: print(""%.2f seconds."" % (time.time() - starttime)) else: print(""done."") if measure_time: mainloop_time = time.time() - starttime_mainloop if measure_time: return np.stack(r_f), init_time, mainloop_time else: return np.stack(r_f) ","def forecast( vil, velocity, timesteps, rainrate=None, n_cascade_levels=8, extrap_method=""semilagrangian"", ar_order=2, ar_window_radius=50, r_vil_window_radius=3, fft_method=""numpy"", apply_rainrate_mask=True, num_workers=1, extrap_kwargs=None, filter_kwargs=None, measure_time=False, ): """"""Generate a nowcast by using the autoregressive nowcasting using VIL (ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast. The key features are: 1) Growth and decay: implemented by using a cascade decomposition and a multiscale autoregressive integrated ARI(p,1) model. Instead of the original time series, the ARI model is applied to the differenced one corresponding to time derivatives. 2) Originally designed for using integrated liquid (VIL) as the input data. In this case, the rain rate (R) is obtained from VIL via an empirical relation. This implementation is more general so that the input can be any two-dimensional precipitation field. 3) The parameters of the ARI model and the R(VIL) relation are allowed to be spatially variable. The estimation is done using a moving window. Parameters ---------- vil: array_like Array of shape (ar_order+2,m,n) containing the input fields ordered by timestamp from oldest to newest. The inputs are expected to contain VIL or rain rate. The time steps between the inputs are assumed to be regular. velocity: array_like Array of shape (2,m,n) containing the x- and y-components of the advection field. The velocities are assumed to represent one time step between the inputs. All values are required to be finite. timesteps: int or list Number of time steps to forecast or a list of time steps for which the forecasts are computed (relative to the input time step). The elements of the list are required to be in ascending order. rainrate: array_like Array of shape (m,n) containing the most recently observed rain rate field. If set to None, no R(VIL) conversion is done and the outputs are in the same units as the inputs. n_cascade_levels: int, optional The number of cascade levels to use. extrap_method: str, optional Name of the extrapolation method to use. See the documentation of pysteps.extrapolation.interface. ar_order: int, optional The order of the autoregressive model to use. The recommended values are 1 or 2. Using a higher-order model is strongly discouraged because the stationarity of the AR process cannot be guaranteed. ar_window_radius: int, optional The radius of the window to use for determining the parameters of the autoregressive model. Set to None to disable localization. r_vil_window_radius: int, optional The radius of the window to use for determining the R(VIL) relation. Applicable if rainrate is not None. fft_method: str, optional A string defining the FFT method to use (see utils.fft.get_method). Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed, the recommended method is 'pyfftw'. apply_rainrate_mask: bool Apply mask to prevent producing precipitation to areas where it was not originally observed. Defaults to True. Disabling this may improve some verification metrics but increases the number of false alarms. Applicable if rainrate is None. num_workers: int, optional The number of workers to use for parallel computation. Applicable if dask is installed or pyFFTW is used for computing the FFT. When num_workers>1, it is advisable to disable OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous threads. extrap_kwargs: dict, optional Optional dictionary containing keyword arguments for the extrapolation method. See the documentation of pysteps.extrapolation. filter_kwargs: dict, optional Optional dictionary containing keyword arguments for the filter method. See the documentation of pysteps.cascade.bandpass_filters.py. measure_time: bool, optional If True, measure, print and return the computation time. Returns ------- out: ndarray A three-dimensional array of shape (num_timesteps,m,n) containing a time series of forecast precipitation fields. The time series starts from t0+timestep, where timestep is taken from the input VIL/rain rate fields. If measure_time is True, the return value is a three-element tuple containing the nowcast array, the initialization time of the nowcast generator and the time used in the main loop (seconds). References ---------- :cite:`PCLH2020` """""" _check_inputs(vil, rainrate, velocity, timesteps, ar_order) if extrap_kwargs is None: extrap_kwargs = dict() else: extrap_kwargs = extrap_kwargs.copy() if filter_kwargs is None: filter_kwargs = dict() print(""Computing ANVIL nowcast:"") print(""------------------------"") print("""") print(""Inputs:"") print(""-------"") print(""input dimensions: %dx%d"" % (vil.shape[1], vil.shape[2])) print("""") print(""Methods:"") print(""--------"") print(""extrapolation: %s"" % extrap_method) print(""FFT: %s"" % fft_method) print("""") print(""Parameters:"") print(""-----------"") if isinstance(timesteps, int): print(""number of time steps: %d"" % timesteps) else: print(""time steps: %s"" % timesteps) print(""parallel threads: %d"" % num_workers) print(""number of cascade levels: %d"" % n_cascade_levels) print(""order of the ARI(p,1) model: %d"" % ar_order) if type(ar_window_radius) == int: print(""ARI(p,1) window radius: %d"" % ar_window_radius) else: print(""ARI(p,1) window radius: none"") print(""R(VIL) window radius: %d"" % r_vil_window_radius) if measure_time: starttime_init = time.time() m, n = vil.shape[1:] vil = vil.copy() if rainrate is None and apply_rainrate_mask: rainrate_mask = vil[-1, :] < 0.1 if rainrate is not None: # determine the coefficients fields of the relation R=a*VIL+b by # localized linear regression r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius) # transform the input fields to Lagrangian coordinates by extrapolation extrapolator = extrapolation.get_method(extrap_method) res = list() def worker(vil, i): return ( i, extrapolator( vil[i, :], velocity, vil.shape[0] - 1 - i, allow_nonfinite_values=True, **extrap_kwargs, )[-1], ) for i in range(vil.shape[0] - 1): if not DASK_IMPORTED or num_workers == 1: vil[i, :, :] = worker(vil, i)[1] else: res.append(dask.delayed(worker)(vil, i)) if DASK_IMPORTED and num_workers > 1: num_workers_ = len(res) if num_workers > len(res) else num_workers vil_e = dask.compute(*res, num_workers=num_workers_) for i in range(len(vil_e)): vil[vil_e[i][0], :] = vil_e[i][1] # compute the final mask as the intersection of the masks of the advected # fields mask = np.isfinite(vil[0, :]) for i in range(1, vil.shape[0]): mask = np.logical_and(mask, np.isfinite(vil[i, :])) if rainrate is None and apply_rainrate_mask: rainrate_mask = np.logical_and(rainrate_mask, mask) # apply cascade decomposition to the advected input fields bp_filter_method = cascade.get_method(""gaussian"") bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs) fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers) decomp_method, recomp_method = cascade.get_method(""fft"") vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n)) for i in range(vil.shape[0]): vil_ = vil[i, :].copy() vil_[~np.isfinite(vil_)] = 0.0 vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft) for j in range(n_cascade_levels): vil_dec[j, i, :] = vil_dec_i[""cascade_levels""][j, :] # compute time-lagged correlation coefficients for the cascade levels of # the advected and differenced input fields gamma = np.empty((n_cascade_levels, ar_order, m, n)) for i in range(n_cascade_levels): vil_diff = np.diff(vil_dec[i, :], axis=0) vil_diff[~np.isfinite(vil_diff)] = 0.0 for j in range(ar_order): gamma[i, j, :] = _moving_window_corrcoef( vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius ) if ar_order == 2: # if the order of the ARI model is 2, adjust the correlation coefficients # so that the resulting process is stationary for i in range(n_cascade_levels): gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2( gamma[i, 0, :], gamma[i, 1, :] ) # estimate the parameters of the ARI models phi = [] for i in range(n_cascade_levels): if ar_order > 2: phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1) elif ar_order == 2: phi_ = _estimate_ar2_params(gamma[i, :]) else: phi_ = _estimate_ar1_params(gamma[i, :]) phi.append(phi_) vil_dec = vil_dec[:, -(ar_order + 1) :, :] if measure_time: init_time = time.time() - starttime_init print(""Starting nowcast computation."") if measure_time: starttime_mainloop = time.time() r_f = [] if isinstance(timesteps, int): timesteps = range(timesteps + 1) timestep_type = ""int"" else: original_timesteps = [0] + list(timesteps) timesteps = nowcast_utils.binned_timesteps(original_timesteps) timestep_type = ""list"" if rainrate is not None: r_f_prev = r_vil_a * vil[-1, :] + r_vil_b else: r_f_prev = vil[-1, :] extrap_kwargs[""return_displacement""] = True dp = None t_nowcast = 0 t_prev = 0.0 for t, timestep in enumerate(timesteps): if timestep_type == ""list"": subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]] else: subtimesteps = [t] if len(subtimesteps) > 1 or t > 0: nowcast_time_step = True else: nowcast_time_step = False if nowcast_time_step: print( ""Computing nowcast for time step %d... "" % (t_nowcast + 1), end="""", flush=True, ) t_nowcast += 1 if measure_time: starttime = time.time() # iterate the ARI models for each cascade level for i in range(n_cascade_levels): vil_dec[i, :] = autoregression.iterate_ar_model(vil_dec[i, :], phi[i]) # recompose the cascade to obtain the forecast field vil_dec_dict = {} vil_dec_dict[""cascade_levels""] = vil_dec[:, -1, :] vil_dec_dict[""domain""] = ""spatial"" vil_dec_dict[""normalized""] = False vil_f = recomp_method(vil_dec_dict) vil_f[~mask] = np.nan if rainrate is not None: # convert VIL to rain rate r_f_new = r_vil_a * vil_f + r_vil_b else: r_f_new = vil_f if apply_rainrate_mask: r_f_new[rainrate_mask] = 0.0 r_f_new[r_f_new < 0.0] = 0.0 # advect the recomposed field to obtain the forecast for the current # time step (or subtimesteps if non-integer time steps are given) for t_sub in subtimesteps: if t_sub > 0: t_diff_prev_int = t_sub - int(t_sub) if t_diff_prev_int > 0.0: r_f_ip = ( 1.0 - t_diff_prev_int ) * r_f_prev + t_diff_prev_int * r_f_new else: r_f_ip = r_f_prev t_diff_prev = t_sub - t_prev extrap_kwargs[""displacement_prev""] = dp r_f_ep, dp = extrapolator( r_f_ip, velocity, [t_diff_prev], allow_nonfinite_values=True, **extrap_kwargs, ) r_f.append(r_f_ep[0]) t_prev = t_sub # advect the forecast field by one time step if no subtimesteps in the # current interval were found if len(subtimesteps) == 0: t_diff_prev = t + 1 - t_prev extrap_kwargs[""displacement_prev""] = dp _, dp = extrapolator( None, velocity, [t_diff_prev], allow_nonfinite_values=True, **extrap_kwargs, ) t_prev = t + 1 r_f_prev = r_f_new if nowcast_time_step: if measure_time: print(""%.2f seconds."" % (time.time() - starttime)) else: print(""done."") if measure_time: mainloop_time = time.time() - starttime_mainloop if measure_time: return np.stack(r_f), init_time, mainloop_time else: return np.stack(r_f) " 54060,"def dropout_node(edge_index: Tensor, edge_attr: OptTensor = None, p: float = 0.5, num_nodes: Optional[int] = None, training: bool = True) -> Tuple[Tensor, OptTensor]: r""""""Randomly drops nodes from the adjacency matrix :obj:`(edge_index, edge_attr)` with probability :obj:`p` using samples from a Bernoulli distribution. Args: edge_index (LongTensor): The edge indices. edge_attr (Tensor, optional): Edge weights or multi-dimensional edge features. (default: :obj:`None`) p (float, optional): Dropout probability. (default: :obj:`0.5`) num_nodes (int, optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) training (bool, optional): If set to :obj:`False`, this operation is a no-op. (default: :obj:`True`) Examples: >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], ... [1, 0, 2, 1, 3, 2]]) >>> edge_attr = torch.tensor([1, 2, 3, 4, 5, 6]) >>> dropout_node(edge_index, edge_attr) (tensor([[2, 3], [3, 2]]), tensor([5, 6])) """""" if p < 0. or p > 1.: raise ValueError(f'Dropout probability has to be between 0 and 1 ' f'(got {p}') if not training or p == 0.0: return edge_index, edge_attr num_nodes = maybe_num_nodes(edge_index, num_nodes) nodes = torch.arange(num_nodes, dtype=torch.long, device=edge_index.device) mask = torch.full_like(nodes, 1 - p, dtype=torch.float32) mask = torch.bernoulli(mask).to(torch.bool) subset = nodes[mask] return subgraph(subset, edge_index, edge_attr, num_nodes=num_nodes) ","def dropout_node(edge_index: Tensor, edge_attr: OptTensor = None, p: float = 0.5, num_nodes: Optional[int] = None, training: bool = True) -> Tuple[Tensor, OptTensor]: r""""""Randomly drops nodes from the adjacency matrix :obj:`(edge_index, edge_attr)` with probability :obj:`p` using samples from a Bernoulli distribution. Args: edge_index (LongTensor): The edge indices. edge_attr (Tensor, optional): Edge weights or multi-dimensional edge features. (default: :obj:`None`) p (float, optional): Dropout probability. (default: :obj:`0.5`) num_nodes (int, optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) training (bool, optional): If set to :obj:`False`, this operation is a no-op. (default: :obj:`True`) Examples: >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], ... [1, 0, 2, 1, 3, 2]]) >>> edge_attr = torch.tensor([1, 2, 3, 4, 5, 6]) >>> dropout_node(edge_index, edge_attr) (tensor([[2, 3], [3, 2]]), tensor([5, 6])) """""" if p < 0. or p > 1.: raise ValueError(f'Dropout probability has to be between 0 and 1 ' f'(got {p}') if not training or p == 0.0: return edge_index, edge_attr num_nodes = maybe_num_nodes(edge_index, num_nodes) prob = torch.rand(num_nodes, device=edge_index.device) mask = prob > p subset = nodes[mask] return subgraph(subset, edge_index, edge_attr, num_nodes=num_nodes) " 48326,"def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict( cluster_name=dict(type='str', required=True), evc_mode=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'present', ['cluster_name', 'evc_mode']] ] ) state = module.params['state'] cluster_name = module.params['cluster_name'] evc_mode = module.params['evc_mode'] content = connect_to_api(module, False) results = dict(changed=False, result=dict()) cluster = find_cluster_by_name(content, cluster_name) evcm = cluster.EvcManager() evc_state = evcm.evcState current_evc_mode = evc_state.currentEVCModeKey supported_evc_modes = evc_state.supportedEVCMode if state == 'present' and current_evc_mode != evc_mode: try: if not module.check_mode: evc_task = evcm.ConfigureEvcMode_Task(evc_mode) wait_for_task(evc_task) results['changed'] = True results['result'] = ""EVC Mode for '%s' has been enabled."" % (evc_mode) except TaskError as invalid_argument: module.fail_json(msg=""Failed to update EVC mode: %s"" % to_native(invalid_argument)) elif state == 'present' and current_evc_mode == evc_mode: results['changed'] = False results['result'] = ""EVC Mode for '%s' is already enabled."" % (evc_mode) elif state == 'absent' and not current_evc_mode: results['changed'] = False results['result'] = ""EVC Mode is already disabled."" elif state == 'absent': try: if not module.check_mode: evc_disable_task = evcm.DisableEvcMode_Task() wait_for_task(evc_disable_task) results['changed'] = True results['result'] = ""EVC Mode has been disabled."" except TaskError as invalid_argument: module.fail_json(msg=""Failed to disable EVC mode: %s"" % to_native(invalid_argument)) module.exit_json(**results) ","def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict( cluster_name=dict(type='str', required=True), evc_mode=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'present', ['cluster_name', 'evc_mode']] ] ) state = module.params['state'] cluster_name = module.params['cluster_name'] evc_mode = module.params['evc_mode'] content = connect_to_api(module, False) results = dict(changed=False, result=dict()) cluster_obj = find_cluster_by_name(content, cluster_name) if not cluster_obj: module.fail_json(msg=""Unable to find the cluster %(cluster_name)s"" % module.params) evcm = cluster.EvcManager() evc_state = evcm.evcState current_evc_mode = evc_state.currentEVCModeKey supported_evc_modes = evc_state.supportedEVCMode if state == 'present' and current_evc_mode != evc_mode: try: if not module.check_mode: evc_task = evcm.ConfigureEvcMode_Task(evc_mode) wait_for_task(evc_task) results['changed'] = True results['result'] = ""EVC Mode for '%s' has been enabled."" % (evc_mode) except TaskError as invalid_argument: module.fail_json(msg=""Failed to update EVC mode: %s"" % to_native(invalid_argument)) elif state == 'present' and current_evc_mode == evc_mode: results['changed'] = False results['result'] = ""EVC Mode for '%s' is already enabled."" % (evc_mode) elif state == 'absent' and not current_evc_mode: results['changed'] = False results['result'] = ""EVC Mode is already disabled."" elif state == 'absent': try: if not module.check_mode: evc_disable_task = evcm.DisableEvcMode_Task() wait_for_task(evc_disable_task) results['changed'] = True results['result'] = ""EVC Mode has been disabled."" except TaskError as invalid_argument: module.fail_json(msg=""Failed to disable EVC mode: %s"" % to_native(invalid_argument)) module.exit_json(**results) " 3939,"def parse_deps_timeout(d_str): if d_str == ""infinity"": return -1 elif d_str.isnumeric(): seconds = int(d_str) else: seconds = pytimeparse.parse(d_str) if seconds is None: raise argparse.ArgumentTypeError( f""'{d_str}' is not a valid duration. Must be either number of seconds or pytimeparse-parsable string."" ) if seconds < 0: raise argparse.ArgumentTypeError(f""'{d_str}' must not be negative"") return seconds ","def parse_deps_timeout(d_str: str) -> int: if d_str == ""infinity"": return -1 elif d_str.isnumeric(): seconds = int(d_str) else: seconds = pytimeparse.parse(d_str) if seconds is None: raise argparse.ArgumentTypeError( f""'{d_str}' is not a valid duration. Must be either number of seconds or pytimeparse-parsable string."" ) if seconds < 0: raise argparse.ArgumentTypeError(f""'{d_str}' must not be negative"") return seconds " 41525,"def multiply(s, coef, unit=None, var=None, inplace=False): """"""Multiply s[var] by the float 'coef' Parameters ---------- s: Spectrum object The spectrum to multiply. coef: float Coefficient of the multiplication. unit: str. unit for ``coef``. If ``None``, ``coef`` is considered to be adimensioned. Else, the spectrum `~radis.spectrum.spectrum.Spectrum.units` is multiplied. var: str, or ``None`` 'radiance', 'transmittance', ... If ``None``, get the unique spectral quantity of ``s`` or raises an error if there is any ambiguity inplace: bool if ``True``, modifies ``s`` directly. Else, returns a copy. Default ``False`` Returns ------- s : Spectrum Spectrum object where intensity of s['var'] is multiplied by coef If ``inplace=True``, ``s`` has been modified directly. """""" # Check input var = _get_unique_var(s, var, inplace) # Case where a is dimensioned if isinstance(coef, u.quantity.Quantity): if unit is not None: raise ValueError( ""Cannot use unit= when giving a dimensioned array ({0})"".format( coef.unit ) ) unit = coef.unit coef = coef.value if not inplace: s = s.copy(quantity=var) # if name is not None: # s.name = name # Multiply inplace ( @dev: we have copied already if needed ) w, I = s.get(var, wunit=s.get_waveunit(), copy=False) I *= coef # @dev: updates the Spectrum directly because of copy=False # Convert Spectrum unit if unit is not None: Iunit = s.units[var] s.units[var] = (u.Unit(Iunit) * u.Unit(unit)).to_string() return s ","def multiply(s, coef, unit=None, var=None, inplace=False): """"""Multiply s[var] by the float 'coef' Parameters ---------- s: Spectrum object The spectrum to multiply. coef: float Coefficient of the multiplication. unit: str, or `~astropy.units.core.Unit` unit for ``coef``. If ``None``, ``coef`` is considered to be adimensioned. Else, the spectrum `~radis.spectrum.spectrum.Spectrum.units` is multiplied. var: str, or ``None`` 'radiance', 'transmittance', ... If ``None``, get the unique spectral quantity of ``s`` or raises an error if there is any ambiguity inplace: bool if ``True``, modifies ``s`` directly. Else, returns a copy. Default ``False`` Returns ------- s : Spectrum Spectrum object where intensity of s['var'] is multiplied by coef If ``inplace=True``, ``s`` has been modified directly. """""" # Check input var = _get_unique_var(s, var, inplace) # Case where a is dimensioned if isinstance(coef, u.quantity.Quantity): if unit is not None: raise ValueError( ""Cannot use unit= when giving a dimensioned array ({0})"".format( coef.unit ) ) unit = coef.unit coef = coef.value if not inplace: s = s.copy(quantity=var) # if name is not None: # s.name = name # Multiply inplace ( @dev: we have copied already if needed ) w, I = s.get(var, wunit=s.get_waveunit(), copy=False) I *= coef # @dev: updates the Spectrum directly because of copy=False # Convert Spectrum unit if unit is not None: Iunit = s.units[var] s.units[var] = (u.Unit(Iunit) * u.Unit(unit)).to_string() return s " 43844,"def test_to_jax(): """"""Test the to_jax method"""""" dev = qml.device(""default.mixed"", wires=2) @qml.qnode(dev, interface=""autograd"") def circuit(weights): qml.RX(weights[0], wires=0) qml.RZ(weights[1], wires=1) return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) circuit.to_jax() weights = jnp.array([0.1, 0.2]) val = circuit(weights) assert ""DeviceArray"" in val.__repr__() ","def test_to_jax(): """"""Test the to_jax method"""""" dev = qml.device(""default.mixed"", wires=2) @qml.qnode(dev, interface=""autograd"") def circuit(weights): qml.RX(weights[0], wires=0) qml.RZ(weights[1], wires=1) return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) """"""Test class for IsingXX and IsingZZ"""""" circuit.to_jax() weights = jnp.array([0.1, 0.2]) val = circuit(weights) assert ""DeviceArray"" in val.__repr__() " 29847,"def _pack( directory: str, *, compression: Optional[str] = None, output: Optional[str] ) -> None: output_file = None output_dir = None # Output may be: # (1) a directory path to output snaps to # (2) an explicit file path to output snap to # (3) unspecified (None), output to current directory (project directory) if output: output_path = pathlib.Path(output) output_parent = output_path.parent if output_path.is_dir(): output_dir = str(output_path) elif output_parent and output_parent != pathlib.Path("".""): output_dir = str(output_parent) output_file = output_path.name else: output_file = output snap_path = file_utils.get_host_tool_path(command_name=""snap"", package_name=""snapd"") command: List[Union[str, pathlib.Path]] = [snap_path, ""pack""] # When None, just use snap pack's default settings. if compression is not None: if compression != ""xz"": echo.warning( f""EXPERIMENTAL: Setting the squash FS compression to {compression!r}."" ) command.extend([""--compression"", compression]) if output_file: command.extend([""--filename"", output_file]) command.append(directory) if output_dir: command.append(output_dir) logger.debug(f""Running pack command: {command}"") snap_filename = _run_pack(command) echo.info(f""Snapped {snap_filename}"") ","def _pack( directory: str, *, compression: Optional[str] = None, output: Optional[str] ) -> None: output_file = None output_dir = None # Output may be: # (1) a directory path to output snaps to # (2) an explicit file path to output snap to # (3) unspecified (None), output to current directory (project directory) if output: output_path = pathlib.Path(output) output_parent = output_path.parent if output_path.is_dir(): output_dir = str(output_path) elif output_parent and output_parent != pathlib.Path("".""): output_dir = str(output_parent) output_file = output_path.name else: output_file = output snap_path = file_utils.get_host_tool_path(command_name=""snap"", package_name=""snapd"") command: List[Union[str, pathlib.Path]] = [snap_path, ""pack""] # When None, just use snap pack's default settings. if compression is not None: if compression != ""xz"": echo.warning( f""EXPERIMENTAL: Setting the squash FS compression to {compression!r}."" ) command.extend([""--compression"", compression]) if output_file: command.extend([""--filename"", output_file]) command.append(directory) if output_dir is not None: command.append(output_dir) logger.debug(f""Running pack command: {command}"") snap_filename = _run_pack(command) echo.info(f""Snapped {snap_filename}"") " 11811,"def logical_xor(image1, image2): """"""Logical XOR between two images. At least one of the images must be ""1"" mode. .. code-block:: python out = ((bool(image1) != bool(image2)) % MAX) :rtype: :py:class:`~PIL.Image.Image` """""" image1.load() image2.load() return image1._new(image1.im.chop_xor(image2.im)) ","def logical_xor(image1, image2): """"""Logical XOR between two images. At least one of the images must have mode. .. code-block:: python out = ((bool(image1) != bool(image2)) % MAX) :rtype: :py:class:`~PIL.Image.Image` """""" image1.load() image2.load() return image1._new(image1.im.chop_xor(image2.im)) " 57713,"def get_test_pack_name(test_id: str, id_set: json) -> str: """"""Returns a the pack name containing the given test Args: test_id (str): The test id to be searched id_set (json): Json representing the content id set Returns: str: the pack name containing the given test """""" id_set_test_playbooks = id_set['TestPlaybooks'] for id_set_test_playbook_entry in id_set_test_playbooks: id_set_test_playbook_id = list(id_set_test_playbook_entry.keys())[0] if id_set_test_playbook_id == test_id: id_set_test_playbook_data = list(id_set_test_playbook_entry.values())[0] id_set_test_playbook_pack_name = id_set_test_playbook_data.get('pack') return id_set_test_playbook_pack_name ","def get_test_pack_name(test_id: str, id_set: json) -> str: """""" Returns the pack name containing the given test Args: test_id (str): The test id to be searched id_set (json): Json representing the content id set Returns: str: the pack name containing the given test """""" id_set_test_playbooks = id_set['TestPlaybooks'] for id_set_test_playbook_entry in id_set_test_playbooks: id_set_test_playbook_id = list(id_set_test_playbook_entry.keys())[0] if id_set_test_playbook_id == test_id: id_set_test_playbook_data = list(id_set_test_playbook_entry.values())[0] id_set_test_playbook_pack_name = id_set_test_playbook_data.get('pack') return id_set_test_playbook_pack_name " 37546,"def level_2_pass_manager(pass_manager_config: PassManagerConfig) -> PassManager: """"""Level 2 pass manager: medium optimization by initial layout selection and gate cancellation using commutativity rules. This pass manager applies the user-given initial layout. If none is given, a search for a perfect layout (i.e. one that satisfies all 2-qubit interactions) is conducted. If no such layout is found, qubits are laid out on the most densely connected subset which also exhibits the best gate fidelities. The pass manager then transforms the circuit to match the coupling constraints. It is then unrolled to the basis, and any flipped cx directions are fixed. Finally, optimizations in the form of commutative gate cancellation and redundant reset removal are performed. Note: In simulators where ``coupling_map=None``, only the unrolling and optimization stages are done. Args: pass_manager_config: configuration of the pass manager. Returns: a level 2 pass manager. Raises: TranspilerError: if the passmanager config is invalid. """""" basis_gates = pass_manager_config.basis_gates inst_map = pass_manager_config.inst_map coupling_map = pass_manager_config.coupling_map initial_layout = pass_manager_config.initial_layout layout_method = pass_manager_config.layout_method or ""dense"" routing_method = pass_manager_config.routing_method or ""stochastic"" translation_method = pass_manager_config.translation_method or ""translator"" scheduling_method = pass_manager_config.scheduling_method instruction_durations = pass_manager_config.instruction_durations seed_transpiler = pass_manager_config.seed_transpiler backend_properties = pass_manager_config.backend_properties approximation_degree = pass_manager_config.approximation_degree unitary_synthesis_method = pass_manager_config.unitary_synthesis_method timing_constraints = pass_manager_config.timing_constraints or TimingConstraints() # 1. Search for a perfect layout, or choose a dense layout, if no layout given _given_layout = SetLayout(initial_layout) def _choose_layout_condition(property_set): # layout hasn't been set yet return not property_set[""layout""] # 1a. If layout_method is not set, first try a trivial layout _choose_layout_0 = ( [] if pass_manager_config.layout_method else [ TrivialLayout(coupling_map), Layout2qDistance(coupling_map, property_name=""trivial_layout_score""), ] ) # 1b. If a trivial layout wasn't perfect (ie no swaps are needed) then try using # CSP layout to find a perfect layout _choose_layout_1 = ( [] if pass_manager_config.layout_method else CSPLayout(coupling_map, call_limit=1000, time_limit=10, seed=seed_transpiler) ) def _trivial_not_perfect(property_set): # Verify that a trivial layout is perfect. If trivial_layout_score > 0 # the layout is not perfect. The layout is unconditionally set by trivial # layout so we need to clear it before contuing. if property_set[""trivial_layout_score""] is not None: if property_set[""trivial_layout_score""] != 0: property_set[""layout""]._wrapped = None return True return False def _csp_not_found_match(property_set): # If a layout hasn't been set by the time we run csp we need to run layout if property_set[""layout""] is None: return True # if CSP layout stopped for any reason other than solution found we need # to run layout since CSP didn't converge. if ( property_set[""CSPLayout_stop_reason""] is not None and property_set[""CSPLayout_stop_reason""] != ""solution found"" ): return True return False # 1c. if CSP layout doesn't converge on a solution use layout_method (dense) to get a layout if layout_method == ""trivial"": _choose_layout_2 = TrivialLayout(coupling_map) elif layout_method == ""dense"": _choose_layout_2 = DenseLayout(coupling_map, backend_properties) elif layout_method == ""noise_adaptive"": _choose_layout_2 = NoiseAdaptiveLayout(backend_properties) elif layout_method == ""sabre"": _choose_layout_2 = SabreLayout(coupling_map, max_iterations=2, seed=seed_transpiler) else: raise TranspilerError(""Invalid layout method %s."" % layout_method) # 2. Extend dag/layout with ancillas using the full coupling map _embed = [FullAncillaAllocation(coupling_map), EnlargeWithAncilla(), ApplyLayout()] # 3. Unroll to 1q or 2q gates _unroll3q = [ # Use unitary synthesis for basis aware decomposition of UnitaryGates UnitarySynthesis( basis_gates, approximation_degree=approximation_degree, coupling_map=coupling_map, backend_props=backend_properties, method=unitary_synthesis_method, min_qubits=3, ), Unroll3qOrMore(), ] # 4. Swap to fit the coupling map _swap_check = CheckMap(coupling_map) def _swap_condition(property_set): return not property_set[""is_swap_mapped""] _swap = [BarrierBeforeFinalMeasurements()] if routing_method == ""basic"": _swap += [BasicSwap(coupling_map)] elif routing_method == ""stochastic"": _swap += [StochasticSwap(coupling_map, trials=20, seed=seed_transpiler)] elif routing_method == ""lookahead"": _swap += [LookaheadSwap(coupling_map, search_depth=5, search_width=5)] elif routing_method == ""sabre"": _swap += [SabreSwap(coupling_map, heuristic=""decay"", seed=seed_transpiler)] elif routing_method == ""none"": _swap += [ Error( msg=( ""No routing method selected, but circuit is not routed to device. "" ""CheckMap Error: {check_map_msg}"" ), action=""raise"", ) ] else: raise TranspilerError(""Invalid routing method %s."" % routing_method) # 5. Unroll to the basis if translation_method == ""unroller"": _unroll = [Unroller(basis_gates)] elif translation_method == ""translator"": from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel _unroll = [ # Use unitary synthesis for basis aware decomposition of UnitaryGates before # custom unrolling UnitarySynthesis( basis_gates, approximation_degree=approximation_degree, coupling_map=coupling_map, backend_props=backend_properties, method=unitary_synthesis_method, ), UnrollCustomDefinitions(sel, basis_gates), BasisTranslator(sel, basis_gates), ] elif translation_method == ""synthesis"": _unroll = [ # Use unitary synthesis for basis aware decomposition of UnitaryGates before # collection UnitarySynthesis( basis_gates, approximation_degree=approximation_degree, coupling_map=coupling_map, backend_props=backend_properties, method=unitary_synthesis_method, min_qubits=3, ), Unroll3qOrMore(), Collect2qBlocks(), ConsolidateBlocks(basis_gates=basis_gates), UnitarySynthesis( basis_gates, approximation_degree=approximation_degree, coupling_map=coupling_map, backend_props=backend_properties, method=unitary_synthesis_method, ), ] else: raise TranspilerError(""Invalid translation method %s."" % translation_method) # 6. Fix any bad CX directions _direction_check = [CheckGateDirection(coupling_map)] def _direction_condition(property_set): return not property_set[""is_direction_mapped""] _direction = [GateDirection(coupling_map)] # 7. Remove zero-state reset _reset = RemoveResetInZeroState() # 8. 1q rotation merge and commutative cancellation iteratively until no more change in depth _depth_check = [Depth(), FixedPoint(""depth"")] def _opt_control(property_set): return not property_set[""depth_fixed_point""] _opt = [ Optimize1qGatesDecomposition(basis_gates), CommutativeCancellation(basis_gates=basis_gates), ] # 9. Unify all durations (either SI, or convert to dt if known) # Schedule the circuit only when scheduling_method is supplied _time_unit_setup = [ContainsInstruction(""delay"")] _time_unit_conversion = [TimeUnitConversion(instruction_durations)] def _contains_delay(property_set): return property_set[""contains_delay""] _scheduling = [] if scheduling_method: _scheduling += _time_unit_conversion if scheduling_method in {""alap"", ""as_late_as_possible""}: _scheduling += [ALAPSchedule(instruction_durations)] elif scheduling_method in {""asap"", ""as_soon_as_possible""}: _scheduling += [ASAPSchedule(instruction_durations)] else: raise TranspilerError(""Invalid scheduling method %s."" % scheduling_method) # 10. Call measure alignment. Should come after scheduling. if ( timing_constraints.granularity != 1 or timing_constraints.min_length != 1 or timing_constraints.acquire_alignment != 1 ): _alignments = [ ValidatePulseGates( granularity=timing_constraints.granularity, min_length=timing_constraints.min_length ), AlignMeasures(alignment=timing_constraints.acquire_alignment), ] else: _alignments = [] # Build pass manager pm2 = PassManager() if coupling_map or initial_layout: pm2.append(_given_layout) pm2.append(_choose_layout_0, condition=_choose_layout_condition) pm2.append(_choose_layout_1, condition=_trivial_not_perfect) pm2.append(_choose_layout_2, condition=_csp_not_found_match) pm2.append(_embed) pm2.append(_unroll3q) pm2.append(_swap_check) pm2.append(_swap, condition=_swap_condition) pm2.append(_unroll) if coupling_map and not coupling_map.is_symmetric: pm2.append(_direction_check) pm2.append(_direction, condition=_direction_condition) pm2.append(_reset) pm2.append(_depth_check + _opt + _unroll, do_while=_opt_control) if inst_map and inst_map.has_custom_gate(): pm2.append(PulseGates(inst_map=inst_map)) if scheduling_method: pm2.append(_scheduling) elif instruction_durations: pm2.append(_time_unit_setup) pm2.append(_time_unit_conversion, condition=_contains_delay) pm2.append(_alignments) return pm2 ","def level_2_pass_manager(pass_manager_config: PassManagerConfig) -> PassManager: """"""Level 2 pass manager: medium optimization by initial layout selection and gate cancellation using commutativity rules. This pass manager applies the user-given initial layout. If none is given, a search for a perfect layout (i.e. one that satisfies all 2-qubit interactions) is conducted. If no such layout is found, qubits are laid out on the most densely connected subset which also exhibits the best gate fidelities. The pass manager then transforms the circuit to match the coupling constraints. It is then unrolled to the basis, and any flipped cx directions are fixed. Finally, optimizations in the form of commutative gate cancellation and redundant reset removal are performed. Note: In simulators where ``coupling_map=None``, only the unrolling and optimization stages are done. Args: pass_manager_config: configuration of the pass manager. Returns: a level 2 pass manager. Raises: TranspilerError: if the passmanager config is invalid. """""" basis_gates = pass_manager_config.basis_gates inst_map = pass_manager_config.inst_map coupling_map = pass_manager_config.coupling_map initial_layout = pass_manager_config.initial_layout layout_method = pass_manager_config.layout_method or ""dense"" routing_method = pass_manager_config.routing_method or ""stochastic"" translation_method = pass_manager_config.translation_method or ""translator"" scheduling_method = pass_manager_config.scheduling_method instruction_durations = pass_manager_config.instruction_durations seed_transpiler = pass_manager_config.seed_transpiler backend_properties = pass_manager_config.backend_properties approximation_degree = pass_manager_config.approximation_degree unitary_synthesis_method = pass_manager_config.unitary_synthesis_method timing_constraints = pass_manager_config.timing_constraints or TimingConstraints() # 1. Search for a perfect layout, or choose a dense layout, if no layout given _given_layout = SetLayout(initial_layout) def _choose_layout_condition(property_set): # layout hasn't been set yet return not property_set[""layout""] # 1a. If layout_method is not set, first try a trivial layout _choose_layout_0 = ( [] if pass_manager_config.layout_method else [ TrivialLayout(coupling_map), Layout2qDistance(coupling_map, property_name=""trivial_layout_score""), ] ) # 1b. If a trivial layout wasn't perfect (ie no swaps are needed) then try using # CSP layout to find a perfect layout _choose_layout_1 = ( [] if pass_manager_config.layout_method else CSPLayout(coupling_map, call_limit=1000, time_limit=10, seed=seed_transpiler) ) def _trivial_not_perfect(property_set): # Verify that a trivial layout is perfect. If trivial_layout_score > 0 # the layout is not perfect. The layout is unconditionally set by trivial # layout so we need to clear it before contuing. if property_set[""trivial_layout_score""] is not None: if property_set[""trivial_layout_score""] != 0: property_set[""layout""]._wrapped = None return True return False def _csp_not_found_match(property_set): # If a layout hasn't been set by the time we run csp we need to run layout if property_set[""layout""] is None: return True # if CSP layout stopped for any reason other than solution found we need # to run layout since CSP didn't converge. if ( property_set[""CSPLayout_stop_reason""] is not None and property_set[""CSPLayout_stop_reason""] != ""solution found"" ): return True return False # 1c. if CSP layout doesn't converge on a solution use layout_method (dense) to get a layout if layout_method == ""trivial"": _choose_layout_2 = TrivialLayout(coupling_map) elif layout_method == ""dense"": _choose_layout_2 = DenseLayout(coupling_map, backend_properties) elif layout_method == ""noise_adaptive"": _choose_layout_2 = NoiseAdaptiveLayout(backend_properties) elif layout_method == ""sabre"": _choose_layout_2 = SabreLayout(coupling_map, max_iterations=2, seed=seed_transpiler) else: raise TranspilerError(""Invalid layout method %s."" % layout_method) # 2. Extend dag/layout with ancillas using the full coupling map _embed = [FullAncillaAllocation(coupling_map), EnlargeWithAncilla(), ApplyLayout()] # 3. Unroll to 1q or 2q gates _unroll3q = [ # Use unitary synthesis for basis aware decomposition of UnitaryGates UnitarySynthesis( basis_gates, approximation_degree=approximation_degree, coupling_map=coupling_map, backend_props=backend_properties, method=unitary_synthesis_method, min_qubits=3, ), Unroll3qOrMore(), ] # 4. Swap to fit the coupling map _swap_check = CheckMap(coupling_map) def _swap_condition(property_set): return not property_set[""is_swap_mapped""] _swap = [BarrierBeforeFinalMeasurements()] if routing_method == ""basic"": _swap += [BasicSwap(coupling_map)] elif routing_method == ""stochastic"": _swap += [StochasticSwap(coupling_map, trials=20, seed=seed_transpiler)] elif routing_method == ""lookahead"": _swap += [LookaheadSwap(coupling_map, search_depth=5, search_width=5)] elif routing_method == ""sabre"": _swap += [SabreSwap(coupling_map, heuristic=""decay"", seed=seed_transpiler)] elif routing_method == ""none"": _swap += [ Error( msg=( ""No routing method selected, but circuit is not routed to device. "" f""CheckMap Error: {check_map_msg}"" ), action=""raise"", ) ] else: raise TranspilerError(""Invalid routing method %s."" % routing_method) # 5. Unroll to the basis if translation_method == ""unroller"": _unroll = [Unroller(basis_gates)] elif translation_method == ""translator"": from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel _unroll = [ # Use unitary synthesis for basis aware decomposition of UnitaryGates before # custom unrolling UnitarySynthesis( basis_gates, approximation_degree=approximation_degree, coupling_map=coupling_map, backend_props=backend_properties, method=unitary_synthesis_method, ), UnrollCustomDefinitions(sel, basis_gates), BasisTranslator(sel, basis_gates), ] elif translation_method == ""synthesis"": _unroll = [ # Use unitary synthesis for basis aware decomposition of UnitaryGates before # collection UnitarySynthesis( basis_gates, approximation_degree=approximation_degree, coupling_map=coupling_map, backend_props=backend_properties, method=unitary_synthesis_method, min_qubits=3, ), Unroll3qOrMore(), Collect2qBlocks(), ConsolidateBlocks(basis_gates=basis_gates), UnitarySynthesis( basis_gates, approximation_degree=approximation_degree, coupling_map=coupling_map, backend_props=backend_properties, method=unitary_synthesis_method, ), ] else: raise TranspilerError(""Invalid translation method %s."" % translation_method) # 6. Fix any bad CX directions _direction_check = [CheckGateDirection(coupling_map)] def _direction_condition(property_set): return not property_set[""is_direction_mapped""] _direction = [GateDirection(coupling_map)] # 7. Remove zero-state reset _reset = RemoveResetInZeroState() # 8. 1q rotation merge and commutative cancellation iteratively until no more change in depth _depth_check = [Depth(), FixedPoint(""depth"")] def _opt_control(property_set): return not property_set[""depth_fixed_point""] _opt = [ Optimize1qGatesDecomposition(basis_gates), CommutativeCancellation(basis_gates=basis_gates), ] # 9. Unify all durations (either SI, or convert to dt if known) # Schedule the circuit only when scheduling_method is supplied _time_unit_setup = [ContainsInstruction(""delay"")] _time_unit_conversion = [TimeUnitConversion(instruction_durations)] def _contains_delay(property_set): return property_set[""contains_delay""] _scheduling = [] if scheduling_method: _scheduling += _time_unit_conversion if scheduling_method in {""alap"", ""as_late_as_possible""}: _scheduling += [ALAPSchedule(instruction_durations)] elif scheduling_method in {""asap"", ""as_soon_as_possible""}: _scheduling += [ASAPSchedule(instruction_durations)] else: raise TranspilerError(""Invalid scheduling method %s."" % scheduling_method) # 10. Call measure alignment. Should come after scheduling. if ( timing_constraints.granularity != 1 or timing_constraints.min_length != 1 or timing_constraints.acquire_alignment != 1 ): _alignments = [ ValidatePulseGates( granularity=timing_constraints.granularity, min_length=timing_constraints.min_length ), AlignMeasures(alignment=timing_constraints.acquire_alignment), ] else: _alignments = [] # Build pass manager pm2 = PassManager() if coupling_map or initial_layout: pm2.append(_given_layout) pm2.append(_choose_layout_0, condition=_choose_layout_condition) pm2.append(_choose_layout_1, condition=_trivial_not_perfect) pm2.append(_choose_layout_2, condition=_csp_not_found_match) pm2.append(_embed) pm2.append(_unroll3q) pm2.append(_swap_check) pm2.append(_swap, condition=_swap_condition) pm2.append(_unroll) if coupling_map and not coupling_map.is_symmetric: pm2.append(_direction_check) pm2.append(_direction, condition=_direction_condition) pm2.append(_reset) pm2.append(_depth_check + _opt + _unroll, do_while=_opt_control) if inst_map and inst_map.has_custom_gate(): pm2.append(PulseGates(inst_map=inst_map)) if scheduling_method: pm2.append(_scheduling) elif instruction_durations: pm2.append(_time_unit_setup) pm2.append(_time_unit_conversion, condition=_contains_delay) pm2.append(_alignments) return pm2 " 12898,"def patch_pagination_args(field: DjangoConnectionField): """"""Add descriptions to pagination arguments in a connection field. By default Graphene's connection fields comes without description for pagination arguments. This functions patches those fields to add the descriptions. """""" field.args[""first""].description = ""Returns the first n elements from the list."" field.args[""last""].description = ""Returns the last n elements from the list."" field.args[ ""before"" ].description = ( ""Returns the elements in the list that come before the specified cursor."" ) field.args[ ""after"" ].description = ( ""Returns the elements in the list that come after the specified cursor."" ) ","def patch_pagination_args(field: DjangoConnectionField): """"""Add descriptions to pagination arguments in a connection field. By default Graphene's connection fields comes without description for pagination arguments. This functions patches those fields to add the descriptions. """""" field.args[""first""].description = ""Return the first n elements from the list."" field.args[""last""].description = ""Returns the last n elements from the list."" field.args[ ""before"" ].description = ( ""Returns the elements in the list that come before the specified cursor."" ) field.args[ ""after"" ].description = ( ""Returns the elements in the list that come after the specified cursor."" ) " 8057,"def fromhdf5(source, where=None, name=None, condition=None, condvars=None, start=None, stop=None, step=None): """""" Provides access to an HDF5 table. E.g.:: >>> import petl as etl >>> >>> # set up a new hdf5 table to demonstrate with >>> class FooBar(tables.IsDescription): # doctest: +SKIP ... foo = tables.Int32Col(pos=0) # doctest: +SKIP ... bar = tables.StringCol(6, pos=2) # doctest: +SKIP >>> # >>> def setup_hdfs5_table(): ... import tables ... h5file = tables.open_file('example.h5', mode='w', ... title='Example file') ... h5file.create_group('/', 'testgroup', 'Test Group') ... h5table = h5file.create_table('/testgroup', 'testtable', FooBar, ... 'Test Table') ... # load some data into the table ... table1 = (('foo', 'bar'), ... (1, b'asdfgh'), ... (2, b'qwerty'), ... (3, b'zxcvbn')) ... for row in table1[1:]: ... for i, f in enumerate(table1[0]): ... h5table.row[f] = row[i] ... h5table.row.append() ... h5file.flush() ... h5file.close() >>> >>> setup_hdfs5_table() # doctest: +SKIP >>> >>> # now demonstrate use of fromhdf5 >>> table1 = etl.fromhdf5('example.h5', '/testgroup', 'testtable') # doctest: +SKIP >>> table1 # doctest: +SKIP +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'asdfgh' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'zxcvbn' | +-----+-----------+ >>> # alternatively just specify path to table node ... table1 = etl.fromhdf5('example.h5', '/testgroup/testtable') # doctest: +SKIP >>> # ...or use an existing tables.File object ... h5file = tables.open_file('example.h5') # doctest: +SKIP >>> table1 = etl.fromhdf5(h5file, '/testgroup/testtable') # doctest: +SKIP >>> # ...or use an existing tables.Table object ... h5tbl = h5file.get_node('/testgroup/testtable') # doctest: +SKIP >>> table1 = etl.fromhdf5(h5tbl) # doctest: +SKIP >>> # use a condition to filter data ... table2 = etl.fromhdf5(h5tbl, condition='foo < 3') # doctest: +SKIP >>> table2 # doctest: +SKIP +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'asdfgh' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ >>> h5file.close() # doctest: +SKIP """""" return HDF5View(source, where=where, name=name, condition=condition, condvars=condvars, start=start, stop=stop, step=step) ","def fromhdf5(source, where=None, name=None, condition=None, condvars=None, start=None, stop=None, step=None): """""" Provides access to an HDF5 table. E.g.:: >>> import petl as etl >>> >>> # set up a new hdf5 table to demonstrate with >>> class FooBar(tables.IsDescription): # doctest: +SKIP ... foo = tables.Int32Col(pos=0) # doctest: +SKIP ... bar = tables.StringCol(6, pos=2) # doctest: +SKIP >>> # >>> def setup_hdf5_table(): ... import tables ... h5file = tables.open_file('example.h5', mode='w', ... title='Example file') ... h5file.create_group('/', 'testgroup', 'Test Group') ... h5table = h5file.create_table('/testgroup', 'testtable', FooBar, ... 'Test Table') ... # load some data into the table ... table1 = (('foo', 'bar'), ... (1, b'asdfgh'), ... (2, b'qwerty'), ... (3, b'zxcvbn')) ... for row in table1[1:]: ... for i, f in enumerate(table1[0]): ... h5table.row[f] = row[i] ... h5table.row.append() ... h5file.flush() ... h5file.close() >>> >>> setup_hdfs5_table() # doctest: +SKIP >>> >>> # now demonstrate use of fromhdf5 >>> table1 = etl.fromhdf5('example.h5', '/testgroup', 'testtable') # doctest: +SKIP >>> table1 # doctest: +SKIP +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'asdfgh' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'zxcvbn' | +-----+-----------+ >>> # alternatively just specify path to table node ... table1 = etl.fromhdf5('example.h5', '/testgroup/testtable') # doctest: +SKIP >>> # ...or use an existing tables.File object ... h5file = tables.open_file('example.h5') # doctest: +SKIP >>> table1 = etl.fromhdf5(h5file, '/testgroup/testtable') # doctest: +SKIP >>> # ...or use an existing tables.Table object ... h5tbl = h5file.get_node('/testgroup/testtable') # doctest: +SKIP >>> table1 = etl.fromhdf5(h5tbl) # doctest: +SKIP >>> # use a condition to filter data ... table2 = etl.fromhdf5(h5tbl, condition='foo < 3') # doctest: +SKIP >>> table2 # doctest: +SKIP +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'asdfgh' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ >>> h5file.close() # doctest: +SKIP """""" return HDF5View(source, where=where, name=name, condition=condition, condvars=condvars, start=start, stop=stop, step=step) " 43080,"def to_program(prog): """"""Convert a Blackbird or an XIR program to a Strawberry Fields program. Args: prog (blackbird.BlackbirdProgram, xir.Program): the input program object Returns: Program: corresponding Strawberry Fields program """""" if isinstance(prog, blackbird.BlackbirdProgram): if not prog.modes: # we can't return an empty program, since we don't know how many modes # to initialize the Program object with. raise ValueError(""Blackbird program contains no quantum operations!"") if prog.programtype[""name""] == ""tdm"": return from_blackbird_to_tdm(prog) return from_blackbird(prog) if isinstance(prog, xir.Program): if prog.options.get(""type"") == ""tdm"": return from_xir_to_tdm(prog) return from_xir(prog) raise TypeError(f""Cannot convert '{prog.__class__}' to Strawberry Fields Program"") ","def to_program(prog): """"""Convert a Blackbird or an XIR program to a Strawberry Fields program. Args: prog (blackbird.BlackbirdProgram, xir.Program): the input program object Returns: Program: corresponding Strawberry Fields program """""" if isinstance(prog, blackbird.BlackbirdProgram): if not prog.modes: # we can't return an empty program, since we don't know how many modes # to initialize the Program object with. raise ValueError(""Blackbird program contains no quantum operations!"") if prog.programtype[""name""] == ""tdm"": return from_blackbird_to_tdm(prog) return from_blackbird(prog) if isinstance(prog, xir.Program): if prog.options.get(""type"") == ""tdm"": return from_xir_to_tdm(prog) return from_xir(prog) raise TypeError(f""Cannot convert {type(prog)}' to Strawberry Fields Program"") " 31224,"def main(): params = demisto.params() args = demisto.args() base_url = params.get('url') # checks for '/' at the end url, if it is not available add it if base_url[-1] != '/': base_url += '/' token = params.get('token') org = params.get('org') mapper_in = params.get('mapper_in', DEFAULT_INCOMING_MAPPER) mapper_out = params.get('mapper_out', DEFAULT_OUTGOING_MAPPER) is_create_enabled = params.get(""create-user-enabled"") is_disable_enabled = params.get(""disable-user-enabled"") is_update_enabled = demisto.params().get(""update-user-enabled"") create_if_not_exists = demisto.params().get(""create-if-not-exists"") verify_certificate = not demisto.params().get('insecure', False) headers = { 'accept': 'application/json', 'content-type': 'application/json', 'Authorization': f'Bearer {token}' } proxy = demisto.params().get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') try: client = Client( base_url=base_url, org=org, verify=verify_certificate, proxy=proxy, headers=headers, ok_codes=(200, 201, 204) ) if command == 'test-module': return_results(test_module(client)) elif command == 'iam-get-user': user_profile = get_user_command(client, args, mapper_in) user_profile.return_outputs elif command == 'iam-create-user': user_profile = create_user_command(client, args, mapper_out, is_create_enabled, is_update_enabled) user_profile.return_outputs elif command == 'iam-update-user': user_profile = update_user_command(client, args, mapper_out, is_update_enabled, is_create_enabled, create_if_not_exists) user_profile.return_outputs elif command == 'iam-disable-user': user_profile = disable_user_command(client, args, mapper_out, is_disable_enabled) user_profile.return_outputs elif command == 'get-mapping-fields': return_results(get_mapping_fields_command()) except Exception as e: # For any other integration command exception, return an error return_error(f'Failed to execute {command} command. Exception: {e}. Traceback: {traceback.format_exc()}') ","def main(): params = demisto.params() args = demisto.args() base_url = params.get('url') # checks for '/' at the end url, if it is not available add it if base_url[-1] != '/': base_url += '/' token = params.get('token') org = params.get('org') mapper_in = params.get('mapper_in', DEFAULT_INCOMING_MAPPER) mapper_out = params.get('mapper_out', DEFAULT_OUTGOING_MAPPER) is_create_enabled = params.get(""create-user-enabled"") is_disable_enabled = params.get(""disable-user-enabled"") is_update_enabled = demisto.params().get(""update-user-enabled"") create_if_not_exists = demisto.params().get(""create-if-not-exists"") verify_certificate = not demisto.params().get('insecure', False) headers = { 'accept': 'application/json', 'content-type': 'application/json', 'Authorization': f'Bearer {token}' } proxy = demisto.params().get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') try: client = Client( base_url=base_url, org=org, verify=verify_certificate, proxy=proxy, headers=headers, ok_codes=(200, 201, 204) ) if command == 'test-module': return_results(test_module(client)) elif command == 'iam-get-user': user_profile = get_user_command(client, args, mapper_in) user_profile.return_outputs() elif command == 'iam-create-user': user_profile = create_user_command(client, args, mapper_out, is_create_enabled, is_update_enabled) user_profile.return_outputs elif command == 'iam-update-user': user_profile = update_user_command(client, args, mapper_out, is_update_enabled, is_create_enabled, create_if_not_exists) user_profile.return_outputs elif command == 'iam-disable-user': user_profile = disable_user_command(client, args, mapper_out, is_disable_enabled) user_profile.return_outputs elif command == 'get-mapping-fields': return_results(get_mapping_fields_command()) except Exception as e: # For any other integration command exception, return an error return_error(f'Failed to execute {command} command. Exception: {e}. Traceback: {traceback.format_exc()}') " 38942,"def test_exclude_none_dict(): class MyModel(BaseModel): a: Optional[int] = None b: int = 2 m = MyModel(a=5) assert m.dict(exclude_none=True) == {'a': 5, 'b': 2} m = MyModel(b=3) assert m.dict(exclude_none=True) == {'b': 3} m = MyModel() assert m.dict(exclude_none=True) == {'b': 2} ","def test_exclude_none(): class MyModel(BaseModel): a: Optional[int] = None b: int = 2 m = MyModel(a=5) assert m.dict(exclude_none=True) == {'a': 5, 'b': 2} m = MyModel(b=3) assert m.dict(exclude_none=True) == {'b': 3} m = MyModel() assert m.dict(exclude_none=True) == {'b': 2} " 37661,"def level_0_pass_manager(pass_manager_config: PassManagerConfig) -> StagedPassManager: """"""Level 0 pass manager: no explicit optimization other than mapping to backend. This pass manager applies the user-given initial layout. If none is given, a trivial layout consisting of mapping the i-th virtual qubit to the i-th physical qubit is used. Any unused physical qubit is allocated as ancilla space. The pass manager then unrolls the circuit to the desired basis, and transforms the circuit to match the coupling map. Args: pass_manager_config: configuration of the pass manager. Returns: a level 0 pass manager. Raises: TranspilerError: if the passmanager config is invalid. """""" plugin_manager = PassManagerStagePluginManager() basis_gates = pass_manager_config.basis_gates inst_map = pass_manager_config.inst_map coupling_map = pass_manager_config.coupling_map initial_layout = pass_manager_config.initial_layout init_method = pass_manager_config.init_method layout_method = pass_manager_config.layout_method or ""trivial"" routing_method = pass_manager_config.routing_method or ""stochastic"" translation_method = pass_manager_config.translation_method or ""translator"" optimization_method = pass_manager_config.optimization_method scheduling_method = pass_manager_config.scheduling_method instruction_durations = pass_manager_config.instruction_durations seed_transpiler = pass_manager_config.seed_transpiler backend_properties = pass_manager_config.backend_properties approximation_degree = pass_manager_config.approximation_degree timing_constraints = pass_manager_config.timing_constraints or TimingConstraints() unitary_synthesis_method = pass_manager_config.unitary_synthesis_method unitary_synthesis_plugin_config = pass_manager_config.unitary_synthesis_plugin_config target = pass_manager_config.target # Choose an initial layout if not set by user (default: trivial layout) _given_layout = SetLayout(initial_layout) def _choose_layout_condition(property_set): return not property_set[""layout""] if layout_method == ""trivial"": _choose_layout = TrivialLayout(coupling_map) elif layout_method == ""dense"": _choose_layout = DenseLayout(coupling_map, backend_properties, target=target) elif layout_method == ""noise_adaptive"": _choose_layout = NoiseAdaptiveLayout(backend_properties) elif layout_method == ""sabre"": _choose_layout = SabreLayout(coupling_map, max_iterations=1, seed=seed_transpiler) toqm_pass = False # Choose routing pass # TODO: Remove when qiskit-toqm has it's own plugin and we can rely on just the plugin interface if routing_method == ""toqm"" and ""toqm"" not in list_stage_plugins(""routing""): HAS_TOQM.require_now(""TOQM-based routing"") from qiskit_toqm import ToqmSwap, ToqmStrategyO0, latencies_from_target if initial_layout: raise TranspilerError(""Initial layouts are not supported with TOQM-based routing."") toqm_pass = True # Note: BarrierBeforeFinalMeasurements is skipped intentionally since ToqmSwap # does not yet support barriers. routing_pass = ToqmSwap( coupling_map, strategy=ToqmStrategyO0( latencies_from_target( coupling_map, instruction_durations, basis_gates, backend_properties, target ) ), ) routing_pm = common.generate_routing_passmanager( routing_pass, target, coupling_map=coupling_map, seed_transpiler=seed_transpiler, use_barrier_before_measurement=not toqm_pass, ) else: routing_pm = plugin_manager.get_passmanager_stage( ""routing"", routing_method, pass_manager_config, optimization_level=0 ) unroll_3q = None # Build pass manager if coupling_map or initial_layout: unroll_3q = common.generate_unroll_3q( target, basis_gates, approximation_degree, unitary_synthesis_method, unitary_synthesis_plugin_config, ) if layout_method not in {""trivial"", ""dense"", ""noise_adaptive"", ""sabre""}: layout = plugin_manager.get_passmanager_stage( ""layout"", layout_method, pass_manager_config, optimization_level=0 ) else: layout = PassManager() layout.append(_given_layout) layout.append(_choose_layout, condition=_choose_layout_condition) layout += common.generate_embed_passmanager(coupling_map) routing = routing_pm else: layout = None routing = None if translation_method not in {""translator"", ""synthesis"", ""unroller""}: translation = plugin_manager.get_passmanager_stage( ""translation"", translation_method, pass_manager_config, optimization_level=0 ) else: translation = common.generate_translation_passmanager( target, basis_gates, translation_method, approximation_degree, coupling_map, backend_properties, unitary_synthesis_method, unitary_synthesis_plugin_config, ) pre_routing = None if toqm_pass: pre_routing = translation if (coupling_map and not coupling_map.is_symmetric) or ( target is not None and target.get_non_global_operation_names(strict_direction=True) ): pre_opt = common.generate_pre_op_passmanager(target, coupling_map) pre_opt += translation else: pre_opt = None if scheduling_method is None or scheduling_method in {""alap"", ""asap""}: sched = common.generate_scheduling( instruction_durations, scheduling_method, timing_constraints, inst_map ) elif isinstance(scheduling_method, PassManager): sched = scheduling_method else: sched = plugin_manager.get_passmanager_stage( ""scheduling"", scheduling_method, pass_manager_config, optimization_level=0 ) if init_method is not None: init = plugin_manager.get_passmanager_stage( ""init"", init_method, pass_manager_config, optimization_level=0 ) else: init = unroll_3q optimization = None if optimization_method is not None: optimization = plugin_manager.get_passmanager_stage( ""optimization"", optimization_method, pass_manager_config, optimization_level=0 ) post_translation = None if pass_manager_config.post_translation_pm is not None: post_translation = pass_manager_config.post_translation_pm return StagedPassManager( init=init, layout=layout, pre_routing=pre_routing, routing=routing, translation=translation, post_translation=post_translation, pre_optimization=pre_opt, optimization=optimization, scheduling=sched, ) ","def level_0_pass_manager(pass_manager_config: PassManagerConfig) -> StagedPassManager: """"""Level 0 pass manager: no explicit optimization other than mapping to backend. This pass manager applies the user-given initial layout. If none is given, a trivial layout consisting of mapping the i-th virtual qubit to the i-th physical qubit is used. Any unused physical qubit is allocated as ancilla space. The pass manager then unrolls the circuit to the desired basis, and transforms the circuit to match the coupling map. Args: pass_manager_config: configuration of the pass manager. Returns: a level 0 pass manager. Raises: TranspilerError: if the passmanager config is invalid. """""" plugin_manager = PassManagerStagePluginManager() basis_gates = pass_manager_config.basis_gates inst_map = pass_manager_config.inst_map coupling_map = pass_manager_config.coupling_map initial_layout = pass_manager_config.initial_layout init_method = pass_manager_config.init_method layout_method = pass_manager_config.layout_method or ""trivial"" routing_method = pass_manager_config.routing_method or ""stochastic"" translation_method = pass_manager_config.translation_method or ""translator"" optimization_method = pass_manager_config.optimization_method scheduling_method = pass_manager_config.scheduling_method instruction_durations = pass_manager_config.instruction_durations seed_transpiler = pass_manager_config.seed_transpiler backend_properties = pass_manager_config.backend_properties approximation_degree = pass_manager_config.approximation_degree timing_constraints = pass_manager_config.timing_constraints or TimingConstraints() unitary_synthesis_method = pass_manager_config.unitary_synthesis_method unitary_synthesis_plugin_config = pass_manager_config.unitary_synthesis_plugin_config target = pass_manager_config.target # Choose an initial layout if not set by user (default: trivial layout) _given_layout = SetLayout(initial_layout) def _choose_layout_condition(property_set): return not property_set[""layout""] if layout_method == ""trivial"": _choose_layout = TrivialLayout(coupling_map) elif layout_method == ""dense"": _choose_layout = DenseLayout(coupling_map, backend_properties, target=target) elif layout_method == ""noise_adaptive"": _choose_layout = NoiseAdaptiveLayout(backend_properties) elif layout_method == ""sabre"": _choose_layout = SabreLayout(coupling_map, max_iterations=1, seed=seed_transpiler) toqm_pass = False # Choose routing pass # TODO: Remove when qiskit-toqm has it's own plugin and we can rely on just the plugin interface if routing_method == ""toqm"" and ""toqm"" not in list_stage_plugins(""routing""): HAS_TOQM.require_now(""TOQM-based routing"") from qiskit_toqm import ToqmSwap, ToqmStrategyO0, latencies_from_target if initial_layout: raise TranspilerError(""Initial layouts are not supported with TOQM-based routing."") toqm_pass = True # Note: BarrierBeforeFinalMeasurements is skipped intentionally since ToqmSwap # does not yet support barriers. routing_pass = ToqmSwap( coupling_map, strategy=ToqmStrategyO0( latencies_from_target( coupling_map, instruction_durations, basis_gates, backend_properties, target ) ), ) routing_pm = common.generate_routing_passmanager( routing_pass, target, coupling_map=coupling_map, seed_transpiler=seed_transpiler, use_barrier_before_measurement=not toqm_pass, ) else: routing_pm = plugin_manager.get_passmanager_stage( ""routing"", routing_method, pass_manager_config, optimization_level=0 ) unroll_3q = None # Build pass manager if coupling_map or initial_layout: unroll_3q = common.generate_unroll_3q( target, basis_gates, approximation_degree, unitary_synthesis_method, unitary_synthesis_plugin_config, ) if layout_method not in {""trivial"", ""dense"", ""noise_adaptive"", ""sabre""}: layout = plugin_manager.get_passmanager_stage( ""layout"", layout_method, pass_manager_config, optimization_level=0 ) else: layout = PassManager() layout.append(_given_layout) layout.append(_choose_layout, condition=_choose_layout_condition) layout += common.generate_embed_passmanager(coupling_map) routing = routing_pm else: layout = None routing = None if translation_method not in {""translator"", ""synthesis"", ""unroller""}: translation = plugin_manager.get_passmanager_stage( ""translation"", translation_method, pass_manager_config, optimization_level=0 ) else: translation = common.generate_translation_passmanager( target, basis_gates, translation_method, approximation_degree, coupling_map, backend_properties, unitary_synthesis_method, unitary_synthesis_plugin_config, ) pre_routing = None if toqm_pass: pre_routing = translation if (coupling_map and not coupling_map.is_symmetric) or ( target is not None and target.get_non_global_operation_names(strict_direction=True) ): pre_opt = common.generate_pre_op_passmanager(target, coupling_map) pre_opt += translation else: pre_opt = None if scheduling_method is None or scheduling_method in {""alap"", ""asap""}: sched = common.generate_scheduling( instruction_durations, scheduling_method, timing_constraints, inst_map ) elif isinstance(scheduling_method, PassManager): sched = scheduling_method else: sched = plugin_manager.get_passmanager_stage( ""scheduling"", scheduling_method, pass_manager_config, optimization_level=0 ) if init_method is not None: init = plugin_manager.get_passmanager_stage( ""init"", init_method, pass_manager_config, optimization_level=0 ) else: init = unroll_3q optimization = None if optimization_method is not None: optimization = plugin_manager.get_passmanager_stage( ""optimization"", optimization_method, pass_manager_config, optimization_level=0 ) post_translation = pass_manager_config.post_translation_pm return StagedPassManager( init=init, layout=layout, pre_routing=pre_routing, routing=routing, translation=translation, post_translation=post_translation, pre_optimization=pre_opt, optimization=optimization, scheduling=sched, ) " 5329,"def add(event, reactors, saltenv=""base"", test=None): """""" Add a new reactor CLI Example: .. code-block:: bash salt-run reactor.add 'salt/cloud/*/destroyed' reactors='/srv/reactor/destroy/*.sls' """""" if not _reactor_system_available(): raise CommandExecutionError(""Reactor system is not running."") if isinstance(reactors, str): reactors = [reactors] with salt.utils.event.get_event( ""master"", __opts__[""sock_dir""], __opts__[""transport""], opts=__opts__, listen=True, ) as sevent: master_key = salt.utils.master.get_master_key(""root"", __opts__) __jid_event__.fire_event( {""event"": event, ""reactors"": reactors, ""key"": master_key}, ""salt/reactors/manage/add"", ) res = sevent.get_event(wait=30, tag=""salt/reactors/manage/add-complete"") return res[""result""] return res.get('result', None) ","def add(event, reactors, saltenv=""base"", test=None): """""" Add a new reactor CLI Example: .. code-block:: bash salt-run reactor.add 'salt/cloud/*/destroyed' reactors='/srv/reactor/destroy/*.sls' """""" if not _reactor_system_available(): raise CommandExecutionError(""Reactor system is not running."") if isinstance(reactors, str): reactors = [reactors] with salt.utils.event.get_event( ""master"", __opts__[""sock_dir""], __opts__[""transport""], opts=__opts__, listen=True, ) as sevent: master_key = salt.utils.master.get_master_key(""root"", __opts__) __jid_event__.fire_event( {""event"": event, ""reactors"": reactors, ""key"": master_key}, ""salt/reactors/manage/add"", ) res = sevent.get_event(wait=30, tag=""salt/reactors/manage/add-complete"") return res[""result""] return res.get(""result"") " 30025,"def _check_box_action(action_space: Box): """"""Checks that box action space. Args: action_space: A box action space """""" if np.any(np.equal(action_space.low, -np.inf)): logger.warn( ""Agent's minimum action space value is -infinity. This is probably too low."" ) if np.any(np.equal(action_space.high, np.inf)): logger.warn( ""Agent's maximum action space value is infinity. This is probably too high"" ) if np.any(np.equal(action_space.low, action_space.high)): logger.warn(""Agent's maximum and minimum action space values are equal"") if np.any(np.greater(action_space.low, action_space.high)): assert False, ""Agent's minimum action value is greater than it's maximum"" if action_space.low.shape != action_space.shape: assert False, ""Agent's action_space.low and action_space have different shapes"" if action_space.high.shape != action_space.shape: assert False, ""Agent's action_space.high and action_space have different shapes"" ","def _check_box_action(action_space: Box): """"""Checks that a :class:`Box` action space is defined in a sensible way. Args: action_space: A box action space """""" if np.any(np.equal(action_space.low, -np.inf)): logger.warn( ""Agent's minimum action space value is -infinity. This is probably too low."" ) if np.any(np.equal(action_space.high, np.inf)): logger.warn( ""Agent's maximum action space value is infinity. This is probably too high"" ) if np.any(np.equal(action_space.low, action_space.high)): logger.warn(""Agent's maximum and minimum action space values are equal"") if np.any(np.greater(action_space.low, action_space.high)): assert False, ""Agent's minimum action value is greater than it's maximum"" if action_space.low.shape != action_space.shape: assert False, ""Agent's action_space.low and action_space have different shapes"" if action_space.high.shape != action_space.shape: assert False, ""Agent's action_space.high and action_space have different shapes"" " 50034,"def get_master(full_table_name): """""" If the table name is that of a part table, then return what the master table name would be. :param full_table_name: :return: Supposed master full table name or empty string if not a part table name. This follows DataJoint's table naming convention where a master and a part must be in the same schema and the part table is prefixed with the mater table name + '__'. Example: `ephys`.`session` -- master `ephys`.`session__recording` -- part """""" match = re.match(r'(?P`\w+`.`\w+)__(?P\w+)`', full_table_name) return match['master'] + '`' if match else '' ","def get_master(full_table_name: str) -> str: """""" If the table name is that of a part table, then return what the master table name would be. This follows DataJoint's table naming convention where a master and a part must be in the same schema and the part table is prefixed with the master table name + ``__``. Example: `ephys`.`session` -- master `ephys`.`session__recording` -- part :param full_table_name: Full table name including part. :type full_table_name: str :return: Supposed master full table name or empty string if not a part table name. :rtype: str """""" match = re.match(r'(?P`\w+`.`\w+)__(?P\w+)`', full_table_name) return match['master'] + '`' if match else '' " 58791,"def pipeline_executor_enabled(): """"""check if pipeline executor enabled. Return ------ enable: bool return pipeline executor get enabled or not """""" pipeline_enabled = False try: pipelinecreate = tvm._ffi.get_global_func(""tvm.pipeline_executor.create"") assert pipelinecreate pipeline_enabled = True except ValueError: print(""pipeline executor not enabled!"") return pipeline_enabled ","def pipeline_executor_enabled(): """"""check if pipeline executor is enabled. Return ------- enable: bool Return pipeline executor is enabled or not. """""" pipeline_enabled = False try: pipelinecreate = tvm._ffi.get_global_func(""tvm.pipeline_executor.create"") assert pipelinecreate pipeline_enabled = True except ValueError: print(""pipeline executor not enabled!"") return pipeline_enabled " 20027,"def image_fusion(img1, img2, wvs1, wvs2, array_type = None, filename = None): """""" Fuse two images of the same size together with given wavelengths representing and make a Spectral_data instance img1: 1st image to be fused img2: 2nd image to be fused wvs1: list of wavelengths represent bands in img1 wvs2: list of wavelengths represent bands in img2 array_type: (optional) description of the fused array filename: (optional) desired filename of the fused array :param img1: np.ndarray :param img2: np.ndarray :param wvs1: list :param wvs2: list :param array_type: str :param filename: str :return: fused_array (a Spectral_data instance) """""" if len(img1.shape) == 2: img1 = np.expand_dims(img1,axis=2) r1, c1, b1 = img1.shape if len(img2.shape) == 2: img2 = np.expand_dims(img2,axis=2) r2, c2, b2 = img2.shape if (r1,c1) != (r2,c2): fatal_error(""Input images should have the same image size"") array_data = np.concatenate((img1, img2), axis=2) # sort all wavelengths wavelengths = np.array(wvs1 + wvs2) ind = np.argsort(wavelengths) wavelengths = wavelengths[ind] wavelength_dict = dict() for (idx, wv) in enumerate(wavelengths): wavelength_dict[wv] = float(idx) # sort array_data based on wavelengths array_data = array_data[:,:,ind] array_data = (array_data / 255).astype(np.float32) max_pixel = float(np.amax(array_data)) min_pixel = float(np.amin(array_data)) d_type = array_data.dtype r, c, b = array_data.shape fused_array = Spectral_data(array_data=array_data, max_wavelength=float(max(wavelengths)), min_wavelength=float(min(wavelengths)), max_value=max_pixel, min_value=min_pixel, d_type=d_type, wavelength_dict=wavelength_dict, samples=int(r * c), lines=int(b), interleave=""bil"", wavelength_units=""nm"", array_type=array_type, pseudo_rgb=None, filename=filename, default_bands=None) # Make pseudo-rgb image and replace it inside the class instance object pseudo_rgb = _make_pseudo_rgb(fused_array) fused_array.pseudo_rgb = pseudo_rgb _debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + ""_fused_pseudo_rgb.png"")) return fused_array ","def image_fusion(img1, img2, wvs1, wvs2, array_type = None, filename = None): """""" Fuse two images of the same size together with given wavelengths representing and make a Spectral_data instance img1: 1st image to be fused img2: 2nd image to be fused wvs1: list of wavelengths represent bands in img1 wvs2: list of wavelengths represent bands in img2 array_type: (optional) description of the fused array filename: (optional) desired filename of the fused array :param img1: np.ndarray :param img2: np.ndarray :param wvs1: list :param wvs2: list :param array_type: str :param filename: str :return: fused_array (a Spectral_data instance) """""" if len(img1.shape) == 2: img1 = np.expand_dims(img1,axis=2) r1, c1, b1 = img1.shape if len(img2.shape) == 2: img2 = np.expand_dims(img2, axis=2) r2, c2, b2 = img2.shape if (r1,c1) != (r2,c2): fatal_error(""Input images should have the same image size"") array_data = np.concatenate((img1, img2), axis=2) # sort all wavelengths wavelengths = np.array(wvs1 + wvs2) ind = np.argsort(wavelengths) wavelengths = wavelengths[ind] wavelength_dict = dict() for (idx, wv) in enumerate(wavelengths): wavelength_dict[wv] = float(idx) # sort array_data based on wavelengths array_data = array_data[:,:,ind] array_data = (array_data / 255).astype(np.float32) max_pixel = float(np.amax(array_data)) min_pixel = float(np.amin(array_data)) d_type = array_data.dtype r, c, b = array_data.shape fused_array = Spectral_data(array_data=array_data, max_wavelength=float(max(wavelengths)), min_wavelength=float(min(wavelengths)), max_value=max_pixel, min_value=min_pixel, d_type=d_type, wavelength_dict=wavelength_dict, samples=int(r * c), lines=int(b), interleave=""bil"", wavelength_units=""nm"", array_type=array_type, pseudo_rgb=None, filename=filename, default_bands=None) # Make pseudo-rgb image and replace it inside the class instance object pseudo_rgb = _make_pseudo_rgb(fused_array) fused_array.pseudo_rgb = pseudo_rgb _debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + ""_fused_pseudo_rgb.png"")) return fused_array " 58377,"def auto_model(layout, scan_length=None, one_vs_rest=False): """"""Create a simple default model for each of the tasks in a BIDSLayout. Contrasts each trial type against all other trial types and trial types at the run level and then uses dummy contrasts at each other level present to aggregate these results up. Parameters ---------- layout : :obj:`bids.layout.BIDSLayout` A BIDSLayout instance scan_length : int Scan length for loading event variables in cases where the scan length can not be read from the nifti. Primarily for testing. one_vs_rest : bool Set to True if you would like to autogenerate contrasts of each trial type against everyother trialtype. Returns ------- list list of model dictionaries for each task """""" base_name = layout._root.name tasks = layout.entities['task'].unique() task_models = [] for task_name in tasks: # Populate model meta-data model = OrderedDict() model[""Name""] = ""_"".join([base_name, task_name]) model[""Description""] = (""Autogenerated model for the %s task from %s"" % (task_name, base_name)) model[""BIDSModelVersion""]= ""1.0.0"" model[""Input""] = {""task"": [task_name]} nodes = [] # Make run level block transformations = OrderedDict( Transformer='pybids-transforms-v1', Instructions=[ OrderedDict( Name='Factor', Input='trial_type' ) ] ) run = OrderedDict(Level='Run', Name='Run', GroupBy=['run', 'subject'], Transformations=transformations) # Get trial types run_nodes = load_variables(layout, task=task_name, levels=['run'], scan_length=scan_length) evs = [] for n in run_nodes.nodes: evs.extend(n.variables['trial_type'].values.values) trial_types = np.unique(evs) trial_type_factors = [""trial_type."" + tt for tt in trial_types] run_model = OrderedDict(Type='glm', X=trial_type_factors) # Add HRF run_model['HRF'] = OrderedDict( Variables=trial_type_factors, Model=""DoubleGamma"", Parameters=OrderedDict( PeakDelay=3, PeakDispersion=6, UndershootDelay=10, UndershootDispersion=12, PeakUndershootRatio=0.2 ) ) run[""Model""] = run_model if one_vs_rest: # If there are multiple trial types, build contrasts contrasts = [] for tt in trial_types: cdict = OrderedDict() if len(trial_types) > 1: cdict[""Name""] = ""run_"" + tt + ""_vs_others"" else: cdict[""Name""] = ""run_"" + tt cdict[""ConditionList""] = trial_type_factors # Calculate weights for contrast weights = np.ones(len(trial_types)) try: weights[trial_types != tt] = -1.0 / (len(trial_types) - 1) except ZeroDivisionError: pass cdict[""Weights""] = list(weights) cdict[""Test""] = ""t"" contrasts.append(cdict) run[""Contrasts""] = contrasts nodes.append(run) if one_vs_rest: # if there are multiple sessions, t-test run level contrasts at # session level sessions = layout.get_sessions() if len(sessions) > 1: # get contrasts names from previous block contrast_names = [cc[""Name""] for cc in nodes[-1][""Contrasts""]] nodes.append(_make_passthrough_contrast( ""Session"", contrast_names)) subjects = layout.get_subjects() if len(subjects) > 1: # get contrasts names from previous block contrast_names = [cc[""Name""] for cc in nodes[-1][""Contrasts""]] nodes.append(_make_passthrough_contrast( ""Subject"", contrast_names)) # get contrasts names from previous block contrast_names = [cc[""Name""] for cc in nodes[-1][""Contrasts""]] nodes.append(_make_passthrough_contrast( ""Dataset"", contrast_names)) model[""Nodes""] = nodes task_models.append(model) return task_models ","def auto_model(layout, scan_length=None, one_vs_rest=False): """"""Create a simple default model for each of the tasks in a BIDSLayout. Contrasts each trial type against all other trial types and trial types at the run level and then uses dummy contrasts at each other level present to aggregate these results up. Parameters ---------- layout : :obj:`bids.layout.BIDSLayout` A BIDSLayout instance scan_length : int Scan length for loading event variables in cases where the scan length can not be read from the nifti. Primarily for testing. one_vs_rest : bool Set to True if you would like to autogenerate contrasts of each trial type against everyother trialtype. Returns ------- list list of model dictionaries for each task """""" base_name = layout._root.name tasks = layout.entities['task'].unique() task_models = [] for task_name in tasks: # Populate model meta-data model = OrderedDict() model[""Name""] = ""_"".join([base_name, task_name]) model[""Description""] = (""Autogenerated model for the %s task from %s"" % (task_name, base_name)) model[""BIDSModelVersion""]= ""1.0.0"" model[""Input""] = {""task"": [task_name]} nodes = [] # Make run level block transformations = dict( Transformer='pybids-transforms-v1', Instructions=[ dict( Name='Factor', Input='trial_type' ) ] ) run = dict(Level='Run', Name='Run', GroupBy=['run', 'subject'], Transformations=transformations) # Get trial types run_nodes = load_variables(layout, task=task_name, levels=['run'], scan_length=scan_length) evs = [] for n in run_nodes.nodes: evs.extend(n.variables['trial_type'].values.values) trial_types = np.unique(evs) trial_type_factors = [""trial_type."" + tt for tt in trial_types] run_model = OrderedDict(Type='glm', X=trial_type_factors) # Add HRF run_model['HRF'] = OrderedDict( Variables=trial_type_factors, Model=""DoubleGamma"", Parameters=OrderedDict( PeakDelay=3, PeakDispersion=6, UndershootDelay=10, UndershootDispersion=12, PeakUndershootRatio=0.2 ) ) run[""Model""] = run_model if one_vs_rest: # If there are multiple trial types, build contrasts contrasts = [] for tt in trial_types: cdict = OrderedDict() if len(trial_types) > 1: cdict[""Name""] = ""run_"" + tt + ""_vs_others"" else: cdict[""Name""] = ""run_"" + tt cdict[""ConditionList""] = trial_type_factors # Calculate weights for contrast weights = np.ones(len(trial_types)) try: weights[trial_types != tt] = -1.0 / (len(trial_types) - 1) except ZeroDivisionError: pass cdict[""Weights""] = list(weights) cdict[""Test""] = ""t"" contrasts.append(cdict) run[""Contrasts""] = contrasts nodes.append(run) if one_vs_rest: # if there are multiple sessions, t-test run level contrasts at # session level sessions = layout.get_sessions() if len(sessions) > 1: # get contrasts names from previous block contrast_names = [cc[""Name""] for cc in nodes[-1][""Contrasts""]] nodes.append(_make_passthrough_contrast( ""Session"", contrast_names)) subjects = layout.get_subjects() if len(subjects) > 1: # get contrasts names from previous block contrast_names = [cc[""Name""] for cc in nodes[-1][""Contrasts""]] nodes.append(_make_passthrough_contrast( ""Subject"", contrast_names)) # get contrasts names from previous block contrast_names = [cc[""Name""] for cc in nodes[-1][""Contrasts""]] nodes.append(_make_passthrough_contrast( ""Dataset"", contrast_names)) model[""Nodes""] = nodes task_models.append(model) return task_models " 5807,"def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8): """""" Compute a double integral. Return the double (definite) integral of ``func(y, x)`` from ``x = a..b`` and ``y = gfun(x)..hfun(x)``. Parameters ---------- func : callable A Python function or method of at least two variables: y must be the first argument and x the second argument. a, b : float The limits of integration in x: `a` < `b` gfun : callable or float The lower boundary curve in y which is a function taking a single floating point argument (x) and returning a floating point result or a float indicating a constant boundary curve. hfun : callable or float The upper boundary curve in y (same requirements as `gfun`). args : sequence, optional Extra arguments to pass to `func`. epsabs : float, optional Absolute tolerance passed directly to the inner 1-D quadrature integration. Default is 1.49e-8. ``dblquad`` tries to obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))`` where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)`` to ``hfun(x)``, and ``result`` is the numerical approximation. See `epsrel` below. epsrel : float, optional Relative tolerance of the inner 1-D integrals. Default is 1.49e-8. If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29 and ``50 * (machine epsilon)``. See `epsabs` above. Returns ------- y : float The resultant integral. abserr : float An estimate of the error. See Also -------- quad : single integral tplquad : triple integral nquad : N-dimensional integrals fixed_quad : fixed-order Gaussian quadrature quadrature : adaptive Gaussian quadrature odeint : ODE integrator ode : ODE integrator simpson : integrator for sampled data romb : integrator for sampled data scipy.special : for coefficients and roots of orthogonal polynomials Examples -------- Compute the double integral of ``x * y**2`` over the box ``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1. That is, :math:`\\int^{x=2}_{x=0} \\int^{y=1}_{y=0} x y^2 dy dx`. >>> from scipy import integrate >>> f = lambda y, x: x*y**2 >>> integrate.dblquad(f, 0, 2, 0, 1) (0.6666666666666667, 7.401486830834377e-15) Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1 dy dx`. >>> from numpy import pi, cos, sin >>> f = lambda y, x: 1 >>> integrate.dblquad(f, 0, pi/4, sin, cos) (0.41421356237309503, 1.1083280054755938e-14) Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=x}_{y=2-x} a x y dy dx` for :math:`a=1, 3`. >>> f = lambda y, x, a: a*x*y >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,)) (0.33333333333333337, 5.551115123125783e-15) >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,)) (0.9999999999999999, 1.6653345369377348e-14) """""" def temp_ranges(*args): return [gfun(args[0]) if callable(gfun) else gfun, hfun(args[0]) if callable(hfun) else hfun] return nquad(func, [temp_ranges, [a, b]], args=args, opts={""epsabs"": epsabs, ""epsrel"": epsrel}) ","def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8): """""" Compute a double integral. Return the double (definite) integral of ``func(y, x)`` from ``x = a..b`` and ``y = gfun(x)..hfun(x)``. Parameters ---------- func : callable A Python function or method of at least two variables: y must be the first argument and x the second argument. a, b : float The limits of integration in x: `a` < `b` gfun : callable or float The lower boundary curve in y which is a function taking a single floating point argument (x) and returning a floating point result or a float indicating a constant boundary curve. hfun : callable or float The upper boundary curve in y (same requirements as `gfun`). args : sequence, optional Extra arguments to pass to `func`. epsabs : float, optional Absolute tolerance passed directly to the inner 1-D quadrature integration. Default is 1.49e-8. ``dblquad`` tries to obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))`` where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)`` to ``hfun(x)``, and ``result`` is the numerical approximation. See `epsrel` below. epsrel : float, optional Relative tolerance of the inner 1-D integrals. Default is 1.49e-8. If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29 and ``50 * (machine epsilon)``. See `epsabs` above. Returns ------- y : float The resultant integral. abserr : float An estimate of the error. See Also -------- quad : single integral tplquad : triple integral nquad : N-dimensional integrals fixed_quad : fixed-order Gaussian quadrature quadrature : adaptive Gaussian quadrature odeint : ODE integrator ode : ODE integrator simpson : integrator for sampled data romb : integrator for sampled data scipy.special : for coefficients and roots of orthogonal polynomials Examples -------- Compute the double integral of ``x * y**2`` over the box ``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1. That is, :math:`\\int^{x=2}_{x=0} \\int^{y=1}_{y=0} x y^2 dy dx`. >>> from scipy import integrate >>> f = lambda y, x: x*y**2 >>> integrate.dblquad(f, 0, 2, 0, 1) (0.6666666666666667, 7.401486830834377e-15) Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1 \,dy \,dx`. >>> from numpy import pi, cos, sin >>> f = lambda y, x: 1 >>> integrate.dblquad(f, 0, pi/4, sin, cos) (0.41421356237309503, 1.1083280054755938e-14) Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=x}_{y=2-x} a x y dy dx` for :math:`a=1, 3`. >>> f = lambda y, x, a: a*x*y >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,)) (0.33333333333333337, 5.551115123125783e-15) >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,)) (0.9999999999999999, 1.6653345369377348e-14) """""" def temp_ranges(*args): return [gfun(args[0]) if callable(gfun) else gfun, hfun(args[0]) if callable(hfun) else hfun] return nquad(func, [temp_ranges, [a, b]], args=args, opts={""epsabs"": epsabs, ""epsrel"": epsrel}) " 45728,"def l1_param(ob, pre): """"""This function calculates the first parameter of location component for SAL based on Wernli et al (2008). This parameter indicates the normalized distance between the center of mass in observation and forecast. Parameters ---------- ob: 2-d ndarray for the observation data. pre: 2-d ndarray for the prediction data. max_distance: Maximum distance of the study domain in kilometers. Returns ------- l1: The first parameter of location component which has a value between 0 to 1. """""" maximum_distance = sqrt(((ob.shape[0]) ** 2) + ((ob.shape[1]) ** 2)) obi = c_m(ob) fori = c_m(pre) dist = hypot(fori[1] - obi[1], fori[0] - obi[0]) l1 = dist / maximum_distance return l1 ","def l1_param(ob, pre): """"""Calculate the first parameter of location component for SAL based on Wernli et al (2008). This parameter indicates the normalized distance between the center of mass in observation and forecast. Parameters ---------- ob: 2-d ndarray for the observation data. pre: 2-d ndarray for the prediction data. max_distance: Maximum distance of the study domain in kilometers. Returns ------- l1: The first parameter of location component which has a value between 0 to 1. """""" maximum_distance = sqrt(((ob.shape[0]) ** 2) + ((ob.shape[1]) ** 2)) obi = c_m(ob) fori = c_m(pre) dist = hypot(fori[1] - obi[1], fori[0] - obi[0]) l1 = dist / maximum_distance return l1 " 36254,"def rank_genes_groups( adata: AnnData, groupby: str, use_raw: bool = True, groups: Union[Literal['all'], Iterable[str]] = 'all', reference: str = 'rest', n_genes: int = 100, rankby_abs: bool = False, key_added: Optional[str] = None, copy: bool = False, method: _Method = 't-test', corr_method: _CorrMethod = 'benjamini-hochberg', layer: Optional[str] = None, **kwds, ) -> Optional[AnnData]: """"""\ Rank genes for characterizing groups. Parameters ---------- adata Annotated data matrix. groupby The key of the observations grouping to consider. use_raw Use `raw` attribute of `adata` if present. layer Key from `adata.layers` whose value will be used to perform tests on. groups Subset of groups, e.g. [`'g1'`, `'g2'`, `'g3'`], to which comparison shall be restricted, or `'all'` (default), for all groups. reference If `'rest'`, compare each group to the union of the rest of the group. If a group identifier, compare with respect to this group. n_genes The number of genes that appear in the returned tables. method The default 't-test_overestim_var' overestimates variance of each group, `'t-test'` uses t-test, `'wilcoxon'` uses Wilcoxon rank-sum, `'logreg'` uses logistic regression. See [Ntranos18]_, `here `__ and `here `__, for why this is meaningful. corr_method p-value correction method. Used only for `'t-test'`, `'t-test_overestim_var'`, and `'wilcoxon'`. rankby_abs Rank genes by the absolute value of the score, not by the score. The returned scores are never the absolute values. key_added The key in `adata.uns` information is saved to. **kwds Are passed to test methods. Currently this affects only parameters that are passed to :class:`sklearn.linear_model.LogisticRegression`. For instance, you can pass `penalty='l1'` to try to come up with a minimal set of genes that are good predictors (sparse solution meaning few non-zero fitted coefficients). Returns ------- **names** : structured `np.ndarray` (`.uns['rank_genes_groups']`) Structured array to be indexed by group id storing the gene names. Ordered according to scores. **scores** : structured `np.ndarray` (`.uns['rank_genes_groups']`) Structured array to be indexed by group id storing the z-score underlying the computation of a p-value for each gene for each group. Ordered according to scores. **logfoldchanges** : structured `np.ndarray` (`.uns['rank_genes_groups']`) Structured array to be indexed by group id storing the log2 fold change for each gene for each group. Ordered according to scores. Only provided if method is 't-test' like. Note: this is an approximation calculated from mean-log values. **pvals** : structured `np.ndarray` (`.uns['rank_genes_groups']`) p-values. **pvals_adj** : structured `np.ndarray` (`.uns['rank_genes_groups']`) Corrected p-values. Notes ----- There are slight inconsistencies depending on whether sparse or dense data are passed. See `here `__. Examples -------- >>> import scanpy as sc >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon') # to visualize the results >>> sc.pl.rank_genes_groups(adata) """""" logg.warning( ""Default of the method has been changed to 't-test' from 't-test_overestim_var'"" ) if 'only_positive' in kwds: rankby_abs = not kwds.pop('only_positive') # backwards compat start = logg.info('ranking genes') avail_methods = {'t-test', 't-test_overestim_var', 'wilcoxon', 'logreg'} if method not in avail_methods: raise ValueError(f'Method must be one of {avail_methods}.') avail_corr = {'benjamini-hochberg', 'bonferroni'} if corr_method not in avail_corr: raise ValueError(f'Correction method must be one of {avail_corr}.') adata = adata.copy() if copy else adata _utils.sanitize_anndata(adata) # for clarity, rename variable if groups == 'all': groups_order = 'all' elif isinstance(groups, (str, int)): raise ValueError('Specify a sequence of groups') else: groups_order = list(groups) if isinstance(groups_order[0], int): groups_order = [str(n) for n in groups_order] if reference != 'rest' and reference not in set(groups_order): groups_order += [reference] if reference != 'rest' and reference not in set(adata.obs[groupby].cat.categories): cats = adata.obs[groupby].cat.categories.tolist() raise ValueError( f'reference = {reference} needs to be one of groupby = {cats}.' ) if key_added is None: key_added = 'rank_genes_groups' adata.uns[key_added] = {} adata.uns[key_added]['params'] = dict( groupby=groupby, reference=reference, method=method, use_raw=use_raw, layer=layer, corr_method=corr_method, ) test_obj = _RankGenesGroups(adata, groups_order, groupby, reference, use_raw, layer) # for clarity, rename variable n_genes_user = n_genes # make sure indices are not OoB in case there are less genes than n_genes if n_genes_user > test_obj.X.shape[1]: n_genes_user = test_obj.X.shape[1] ns = np.count_nonzero(test_obj.groups_masks, axis=1) logg.debug(f'consider {groupby!r} groups:') logg.debug(f'with sizes: {ns}') del ns test_obj.compute_statistics(method, corr_method, n_genes_user, rankby_abs, **kwds) if test_obj.pts is not None: groups_names = [str(name) for name in test_obj.groups_order] adata.uns[key_added]['pts'] = pd.DataFrame( test_obj.pts.T, index=test_obj.var_names, columns=groups_names ) if test_obj.pts_rest is not None: adata.uns[key_added]['pts_rest'] = pd.DataFrame( test_obj.pts_rest.T, index=test_obj.var_names, columns=groups_names ) groups_names = test_obj.groups_names n_groups = len(groups_names) dtypes = { 'names': ['U50'] * n_groups, 'scores': ['float32'] * n_groups, 'logfoldchanges': ['float32'] * n_groups, 'pvals': ['float64'] * n_groups, 'pvals_adj': ['float64'] * n_groups, } for k in test_obj.d: adata.uns[key_added][k] = np.rec.fromarrays( test_obj.d[k].values(), names=groups_names, formats=dtypes[k] ) logg.info( ' finished', time=start, deep=( f'added to `.uns[{key_added!r}]`\n' "" 'names', sorted np.recarray to be indexed by group ids\n"" "" 'scores', sorted np.recarray to be indexed by group ids\n"" + ( "" 'logfoldchanges', sorted np.recarray to be indexed by group ids\n"" "" 'pvals', sorted np.recarray to be indexed by group ids\n"" "" 'pvals_adj', sorted np.recarray to be indexed by group ids"" if method in {'t-test', 't-test_overestim_var', 'wilcoxon'} else '' ) ), ) return adata if copy else None ","def rank_genes_groups( adata: AnnData, groupby: str, use_raw: bool = True, groups: Union[Literal['all'], Iterable[str]] = 'all', reference: str = 'rest', n_genes: int = 100, rankby_abs: bool = False, key_added: Optional[str] = None, copy: bool = False, method: _Method = 't-test', corr_method: _CorrMethod = 'benjamini-hochberg', layer: Optional[str] = None, **kwds, ) -> Optional[AnnData]: """"""\ Rank genes for characterizing groups. Parameters ---------- adata Annotated data matrix. groupby The key of the observations grouping to consider. use_raw Use `raw` attribute of `adata` if present. layer Key from `adata.layers` whose value will be used to perform tests on. groups Subset of groups, e.g. [`'g1'`, `'g2'`, `'g3'`], to which comparison shall be restricted, or `'all'` (default), for all groups. reference If `'rest'`, compare each group to the union of the rest of the group. If a group identifier, compare with respect to this group. n_genes The number of genes that appear in the returned tables. method The default 't-test_overestim_var' overestimates variance of each group, `'t-test'` uses t-test, `'wilcoxon'` uses Wilcoxon rank-sum, `'logreg'` uses logistic regression. See [Ntranos18]_, `here `__ and `here `__, for why this is meaningful. corr_method p-value correction method. Used only for `'t-test'`, `'t-test_overestim_var'`, and `'wilcoxon'`. rankby_abs Rank genes by the absolute value of the score, not by the score. The returned scores are never the absolute values. key_added The key in `adata.uns` information is saved to. **kwds Are passed to test methods. Currently this affects only parameters that are passed to :class:`sklearn.linear_model.LogisticRegression`. For instance, you can pass `penalty='l1'` to try to come up with a minimal set of genes that are good predictors (sparse solution meaning few non-zero fitted coefficients). Returns ------- **names** : structured `np.ndarray` (`.uns['rank_genes_groups']`) Structured array to be indexed by group id storing the gene names. Ordered according to scores. **scores** : structured `np.ndarray` (`.uns['rank_genes_groups']`) Structured array to be indexed by group id storing the z-score underlying the computation of a p-value for each gene for each group. Ordered according to scores. **logfoldchanges** : structured `np.ndarray` (`.uns['rank_genes_groups']`) Structured array to be indexed by group id storing the log2 fold change for each gene for each group. Ordered according to scores. Only provided if method is 't-test' like. Note: this is an approximation calculated from mean-log values. **pvals** : structured `np.ndarray` (`.uns['rank_genes_groups']`) p-values. **pvals_adj** : structured `np.ndarray` (`.uns['rank_genes_groups']`) Corrected p-values. Notes ----- There are slight inconsistencies depending on whether sparse or dense data are passed. See `here `__. Examples -------- >>> import scanpy as sc >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon') # to visualize the results >>> sc.pl.rank_genes_groups(adata) """""" logg.warning( ""Default of the method has been changed to 't-test' from 't-test_overestim_var'"" ) if 'only_positive' in kwds: rankby_abs = not kwds.pop('only_positive') # backwards compat start = logg.info('ranking genes') avail_methods = {'t-test', 't-test_overestim_var', 'wilcoxon', 'logreg'} if method not in avail_methods: raise ValueError(f'Method must be one of {avail_methods}.') avail_corr = {'benjamini-hochberg', 'bonferroni'} if corr_method not in avail_corr: raise ValueError(f'Correction method must be one of {avail_corr}.') adata = adata.copy() if copy else adata _utils.sanitize_anndata(adata) # for clarity, rename variable if groups == 'all': groups_order = 'all' elif isinstance(groups, (str, int)): raise ValueError('Specify a sequence of groups') else: groups_order = list(groups) if isinstance(groups_order[0], int): groups_order = [str(n) for n in groups_order] if reference != 'rest' and reference not in set(groups_order): groups_order += [reference] if reference != 'rest' and reference not in adata.obs[groupby]: cats = adata.obs[groupby].cat.categories.tolist() raise ValueError( f'reference = {reference} needs to be one of groupby = {cats}.' ) if key_added is None: key_added = 'rank_genes_groups' adata.uns[key_added] = {} adata.uns[key_added]['params'] = dict( groupby=groupby, reference=reference, method=method, use_raw=use_raw, layer=layer, corr_method=corr_method, ) test_obj = _RankGenesGroups(adata, groups_order, groupby, reference, use_raw, layer) # for clarity, rename variable n_genes_user = n_genes # make sure indices are not OoB in case there are less genes than n_genes if n_genes_user > test_obj.X.shape[1]: n_genes_user = test_obj.X.shape[1] ns = np.count_nonzero(test_obj.groups_masks, axis=1) logg.debug(f'consider {groupby!r} groups:') logg.debug(f'with sizes: {ns}') del ns test_obj.compute_statistics(method, corr_method, n_genes_user, rankby_abs, **kwds) if test_obj.pts is not None: groups_names = [str(name) for name in test_obj.groups_order] adata.uns[key_added]['pts'] = pd.DataFrame( test_obj.pts.T, index=test_obj.var_names, columns=groups_names ) if test_obj.pts_rest is not None: adata.uns[key_added]['pts_rest'] = pd.DataFrame( test_obj.pts_rest.T, index=test_obj.var_names, columns=groups_names ) groups_names = test_obj.groups_names n_groups = len(groups_names) dtypes = { 'names': ['U50'] * n_groups, 'scores': ['float32'] * n_groups, 'logfoldchanges': ['float32'] * n_groups, 'pvals': ['float64'] * n_groups, 'pvals_adj': ['float64'] * n_groups, } for k in test_obj.d: adata.uns[key_added][k] = np.rec.fromarrays( test_obj.d[k].values(), names=groups_names, formats=dtypes[k] ) logg.info( ' finished', time=start, deep=( f'added to `.uns[{key_added!r}]`\n' "" 'names', sorted np.recarray to be indexed by group ids\n"" "" 'scores', sorted np.recarray to be indexed by group ids\n"" + ( "" 'logfoldchanges', sorted np.recarray to be indexed by group ids\n"" "" 'pvals', sorted np.recarray to be indexed by group ids\n"" "" 'pvals_adj', sorted np.recarray to be indexed by group ids"" if method in {'t-test', 't-test_overestim_var', 'wilcoxon'} else '' ) ), ) return adata if copy else None " 5797,"def _rename_parameter(new_name, old_name, dep_version=None): """""" Generate decorator for function with recently-renamed parameter. Apply the decorator generated by `_rename_parameter` to functions with a recently renamed parameter. After decoration, the function behaves as follows: If only the new parameter is passed into the function, behave as usual. If only the old parameter is passed into the function (as a keyword), raise a DeprecationWarning if `dep_version` is provided, and behave as usual otherwise. If both old and new parameters are passed into the function, raise a DeprecationWarning if `dep_version` is provided, and raise the appropriate TypeError (function got multiple values for argument). Parameters ---------- new_name : str New name of parameter old_name : str Old name of parameter dep_version : str, optional Version of SciPy in which old parameter was deprecated Notes ----- Untested with functions that accept *args. Probably won't work as written. """""" def decorator(fun): @functools.wraps(fun) def wrapper(*args, **kwds): # Check for intersection between positional and keyword args params = list(inspect.signature(fun).parameters) d_args = dict(zip(params, args)) intersection = set(d_args) & set(kwds) if intersection: message = (f""{fun.__name__}() got multiple values "" f""for argument '{list(intersection)[0]}'"") raise TypeError(message) # Consolidate other positional and keyword args into `kwds` kwds.update(d_args) new_param = kwds.get(new_name, None) got_new = new_param is not None got_keyword_old = kwds.get(old_name, None) is not None if got_keyword_old and dep_version: message = (f""Use of keyword argument `{old_name}` is "" f""deprecated and replaced by `{new_name}`. "" f""Support for `{old_name}` will be removed two "" f""feature releases after SciPy {dep_version}."") warnings.warn(message, DeprecationWarning, stacklevel=2) if got_keyword_old and got_new: message = (f""{fun.__name__}() got multiple values for "" f""argument now known as `{new_name}`"") raise TypeError(message) kwds[new_name] = kwds.pop(old_name, new_param) return fun(**kwds) return wrapper return decorator ","def _rename_parameter(new_name, old_name, dep_version=None): """""" Generate decorator for backward-compatible keyword renaming Apply the decorator generated by `_rename_parameter` to functions with a recently renamed parameter. After decoration, the function behaves as follows: If only the new parameter is passed into the function, behave as usual. If only the old parameter is passed into the function (as a keyword), raise a DeprecationWarning if `dep_version` is provided, and behave as usual otherwise. If both old and new parameters are passed into the function, raise a DeprecationWarning if `dep_version` is provided, and raise the appropriate TypeError (function got multiple values for argument). Parameters ---------- new_name : str New name of parameter old_name : str Old name of parameter dep_version : str, optional Version of SciPy in which old parameter was deprecated Notes ----- Untested with functions that accept *args. Probably won't work as written. """""" def decorator(fun): @functools.wraps(fun) def wrapper(*args, **kwds): # Check for intersection between positional and keyword args params = list(inspect.signature(fun).parameters) d_args = dict(zip(params, args)) intersection = set(d_args) & set(kwds) if intersection: message = (f""{fun.__name__}() got multiple values "" f""for argument '{list(intersection)[0]}'"") raise TypeError(message) # Consolidate other positional and keyword args into `kwds` kwds.update(d_args) new_param = kwds.get(new_name, None) got_new = new_param is not None got_keyword_old = kwds.get(old_name, None) is not None if got_keyword_old and dep_version: message = (f""Use of keyword argument `{old_name}` is "" f""deprecated and replaced by `{new_name}`. "" f""Support for `{old_name}` will be removed two "" f""feature releases after SciPy {dep_version}."") warnings.warn(message, DeprecationWarning, stacklevel=2) if got_keyword_old and got_new: message = (f""{fun.__name__}() got multiple values for "" f""argument now known as `{new_name}`"") raise TypeError(message) kwds[new_name] = kwds.pop(old_name, new_param) return fun(**kwds) return wrapper return decorator " 47052,"def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith("".json""): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f""Output directory ({training_args.output_dir}) already exists and is not empty."" ""Use --overwrite_output_dir to overcome."" ) # Setup logging logging.basicConfig( format=""%(asctime)s - %(levelname)s - %(name)s - %(message)s"", datefmt=""%m/%d/%Y %H:%M:%S"", ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"" + f""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info(""Training/evaluation parameters %s"", training_args) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) else: data_files = {} if data_args.train_file is not None: data_files[""train""] = data_args.train_file if data_args.validation_file is not None: data_files[""validation""] = data_args.validation_file extension = data_args.train_file.split(""."")[-1] datasets = load_dataset(extension, data_files=data_files, field=""data"") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForQuestionAnswering.from_pretrained( model_args.model_name_or_path, from_tf=bool("".ckpt"" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( ""This example script only works for models that have a fast tokenizer. Checkout the big table of models "" ""at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this "" ""requirement"" ) # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. if training_args.do_train: column_names = datasets[""train""].column_names else: column_names = datasets[""validation""].column_names question_column_name = ""question"" if ""question"" in column_names else column_names[0] context_column_name = ""context"" if ""context"" in column_names else column_names[1] answer_column_name = ""answers"" if ""answers"" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). pad_on_right = tokenizer.padding_side == ""right"" # Training preprocessing def prepare_train_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation=""only_second"" if pad_on_right else ""only_first"", max_length=data_args.max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding=""max_length"" if data_args.pad_to_max_length else False, ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop(""overflow_to_sample_mapping"") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop(""offset_mapping"") # Let's label those examples! tokenized_examples[""start_positions""] = [] tokenized_examples[""end_positions""] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples[""input_ids""][i] cls_index = input_ids.index(tokenizer.cls_token_id) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples[answer_column_name][sample_index] # If no answers are given, set the cls_index as answer. if len(answers[""answer_start""]) == 0: tokenized_examples[""start_positions""].append(cls_index) tokenized_examples[""end_positions""].append(cls_index) else: # Start/end character index of the answer in the text. start_char = answers[""answer_start""][0] end_char = start_char + len(answers[""text""][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != (1 if pad_on_right else 0): token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != (1 if pad_on_right else 0): token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples[""start_positions""].append(cls_index) tokenized_examples[""end_positions""].append(cls_index) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples[""start_positions""].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples[""end_positions""].append(token_end_index + 1) return tokenized_examples if training_args.do_train: train_dataset = datasets[""train""].map( prepare_train_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) # Validation preprocessing def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation=""only_second"" if pad_on_right else ""only_first"", max_length=data_args.max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding=""max_length"" if data_args.pad_to_max_length else False, ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop(""overflow_to_sample_mapping"") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. tokenized_examples[""example_id""] = [] for i in range(len(tokenized_examples[""input_ids""])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) context_index = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples[""example_id""].append(examples[""id""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples[""offset_mapping""][i] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples[""offset_mapping""][i]) ] return tokenized_examples if training_args.do_eval: validation_dataset = datasets[""validation""].map( prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator # We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data # collator. data_collator = ( default_data_collator if data_args.pad_to_max_length else DataCollatorWithPadding( tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None, ) ) # Post-processing: def post_processing_function(examples, features, predictions): # Post-processing: we match the start logits and end logits to answers in the original context. predictions = postprocess_qa_predictions( examples=examples, features=features, predictions=predictions, version_2_with_negative=data_args.version_2_with_negative, n_best_size=data_args.n_best_size, max_answer_length=data_args.max_answer_length, null_score_diff_threshold=data_args.null_score_diff_threshold, output_dir=training_args.output_dir, is_world_process_zero=trainer.is_world_process_zero(), ) # Format the result to the format the metric expects. if data_args.version_2_with_negative: formatted_predictions = [ {""id"": k, ""prediction_text"": v, ""no_answer_probability"": 0.0} for k, v in predictions.items() ] else: formatted_predictions = [{""id"": k, ""prediction_text"": v} for k, v in predictions.items()] references = [{""id"": ex[""id""], ""answers"": ex[answer_column_name]} for ex in datasets[""validation""]] return EvalPrediction(predictions=formatted_predictions, label_ids=references) metric = load_metric(""squad_v2"" if data_args.version_2_with_negative else ""squad"") def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) # Initialize our Trainer trainer = QuestionAnsweringTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=validation_dataset if training_args.do_eval else None, eval_examples=datasets[""validation""] if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, post_process_function=post_processing_function, compute_metrics=compute_metrics, ) # Training if training_args.do_train: train_result = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # Saves the tokenizer too for easy upload output_train_file = os.path.join(training_args.output_dir, ""train_results.txt"") if trainer.is_world_process_zero(): with open(output_train_file, ""w"") as writer: logger.info(""***** Train results *****"") for key, value in sorted(train_result.metrics.items()): logger.info(f"" {key} = {value}"") writer.write(f""{key} = {value}\n"") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, ""trainer_state.json"")) # Evaluation results = {} if training_args.do_eval: logger.info(""*** Evaluate ***"") results = trainer.evaluate() output_eval_file = os.path.join(training_args.output_dir, ""eval_results.txt"") if trainer.is_world_process_zero(): with open(output_eval_file, ""w"") as writer: logger.info(""***** Eval results *****"") for key, value in sorted(results.items()): logger.info(f"" {key} = {value}"") writer.write(f""{key} = {value}\n"") return results ","def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith("".json""): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f""Output directory ({training_args.output_dir}) already exists and is not empty."" ""Use --overwrite_output_dir to overcome."" ) # Setup logging logging.basicConfig( format=""%(asctime)s - %(levelname)s - %(name)s - %(message)s"", datefmt=""%m/%d/%Y %H:%M:%S"", ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"" + f""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info(""Training/evaluation parameters %s"", training_args) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) else: data_files = {} if data_args.train_file is not None: data_files[""train""] = data_args.train_file if data_args.validation_file is not None: data_files[""validation""] = data_args.validation_file extension = data_args.train_file.split(""."")[-1] datasets = load_dataset(extension, data_files=data_files, field=""data"") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForQuestionAnswering.from_pretrained( model_args.model_name_or_path, from_tf=bool("".ckpt"" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( ""This example script only works for models that have a fast tokenizer. Checkout the big table of models "" ""at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this "" ""requirement"" ) # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. if training_args.do_train: column_names = datasets[""train""].column_names else: column_names = datasets[""validation""].column_names question_column_name = ""question"" if ""question"" in column_names else column_names[0] context_column_name = ""context"" if ""context"" in column_names else column_names[1] answer_column_name = ""answers"" if ""answers"" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). pad_on_right = tokenizer.padding_side == ""right"" # Training preprocessing def prepare_train_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation=""only_second"" if pad_on_right else ""only_first"", max_length=data_args.max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding=""max_length"" if data_args.pad_to_max_length else False, ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop(""overflow_to_sample_mapping"") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop(""offset_mapping"") # Let's label those examples! tokenized_examples[""start_positions""] = [] tokenized_examples[""end_positions""] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples[""input_ids""][i] cls_index = input_ids.index(tokenizer.cls_token_id) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples[answer_column_name][sample_index] # If no answers are given, set the cls_index as answer. if len(answers[""answer_start""]) == 0: tokenized_examples[""start_positions""].append(cls_index) tokenized_examples[""end_positions""].append(cls_index) else: # Start/end character index of the answer in the text. start_char = answers[""answer_start""][0] end_char = start_char + len(answers[""text""][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != (1 if pad_on_right else 0): token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != (1 if pad_on_right else 0): token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples[""start_positions""].append(cls_index) tokenized_examples[""end_positions""].append(cls_index) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples[""start_positions""].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples[""end_positions""].append(token_end_index + 1) return tokenized_examples if training_args.do_train: train_dataset = datasets[""train""].map( prepare_train_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) # Validation preprocessing def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation=""only_second"" if pad_on_right else ""only_first"", max_length=data_args.max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding=""max_length"" if data_args.pad_to_max_length else False, ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop(""overflow_to_sample_mapping"") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. tokenized_examples[""example_id""] = [] for i in range(len(tokenized_examples[""input_ids""])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) context_index = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples[""example_id""].append(examples[""id""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples[""offset_mapping""][i] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples[""offset_mapping""][i]) ] return tokenized_examples if training_args.do_eval: validation_dataset = datasets[""validation""].map( prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator # We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data # collator. data_collator = ( default_data_collator if data_args.pad_to_max_length else DataCollatorWithPadding( tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None, ) ) # Post-processing: def post_processing_function(examples, features, predictions): # Post-processing: we match the start logits and end logits to answers in the original context. predictions = postprocess_qa_predictions( examples=examples, features=features, predictions=predictions, version_2_with_negative=data_args.version_2_with_negative, n_best_size=data_args.n_best_size, max_answer_length=data_args.max_answer_length, null_score_diff_threshold=data_args.null_score_diff_threshold, output_dir=training_args.output_dir, is_world_process_zero=trainer.is_world_process_zero(), ) # Format the result to the format the metric expects. if data_args.version_2_with_negative: formatted_predictions = [ {""id"": k, ""prediction_text"": v, ""no_answer_probability"": 0.0} for k, v in predictions.items() ] else: formatted_predictions = [{""id"": k, ""prediction_text"": v} for k, v in predictions.items()] references = [{""id"": ex[""id""], ""answers"": ex[answer_column_name]} for ex in datasets[""validation""]] return EvalPrediction(predictions=formatted_predictions, label_ids=references) metric = load_metric(""squad_v2"" if data_args.version_2_with_negative else ""squad"") def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) # Initialize our Trainer trainer = QuestionAnsweringTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=validation_dataset if training_args.do_eval else None, eval_examples=datasets[""validation""] if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, post_process_function=post_processing_function, compute_metrics=compute_metrics, ) # Training if training_args.do_train: train_result = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # Saves the tokenizer too for easy upload output_train_file = os.path.join(training_args.output_dir, ""train_results.txt"") if trainer.is_world_process_zero(): with open(output_train_file, ""w"") as writer: logger.info(""***** Train results *****"") for key, value in sorted(train_result.metrics.items()): logger.info(f"" {key} = {value}"") writer.write(f""{key} = {value}\n"") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, ""trainer_state.json"")) # Evaluation results = {} if training_args.do_eval: logger.info(""*** Evaluate ***"") results = trainer.evaluate() output_eval_file = os.path.join(training_args.output_dir, ""eval_results.txt"") if trainer.is_world_process_zero(): with open(output_eval_file, ""w"") as writer: logger.info(""***** Eval results *****"") for key, value in sorted(results.items()): logger.info(f"" {key} = {value}"") writer.write(f""{key} = {value}\n"") return results " 31440,"def close_benign_command(client: Client, args: dict): alert_ids = args.get('alert_ids') custom_filter = args.get('custom_filter') comment = args.get('comment') reason = CLOSE_BENIGN_REASON_OPTIONS.get(str(args.get('reason'))) sendFeedback = bool(args.get('sendFeedback')) feedbackText = args.get('feedbackText') allowContact = bool(args.get('allowContact')) contactEmail = args.get('contactEmail') request_data = args_to_filter_close_alerts(alert_ids, custom_filter, comment, reason, sendFeedback, feedbackText, allowContact, contactEmail) close_benign_alerts = client.close_benign(request_data) number_of_close_benign = close_benign_alerts[""close_benign""] return CommandResults( readable_output=f'{number_of_close_benign} alerts are classified as close benign', outputs_prefix='MicrosoftCloudAppSecurity.Alerts', outputs_key_field='_id', outputs=close_benign_alerts) ","def close_benign_command(client: Client, args: dict): alert_ids = args.get('alert_ids') custom_filter = args.get('custom_filter') comment = args.get('comment') reason = CLOSE_BENIGN_REASON_OPTIONS.get(str(args.get('reason'))) sendFeedback = argToBoolean(args.get('sendFeedback')) feedbackText = args.get('feedbackText') allowContact = bool(args.get('allowContact')) contactEmail = args.get('contactEmail') request_data = args_to_filter_close_alerts(alert_ids, custom_filter, comment, reason, sendFeedback, feedbackText, allowContact, contactEmail) close_benign_alerts = client.close_benign(request_data) number_of_close_benign = close_benign_alerts[""close_benign""] return CommandResults( readable_output=f'{number_of_close_benign} alerts are classified as close benign', outputs_prefix='MicrosoftCloudAppSecurity.Alerts', outputs_key_field='_id', outputs=close_benign_alerts) " 13228,"def get_reana_badge(record): img_url = 'https://camo.githubusercontent.com/e7778200587ecd095b6d70c66bae6c09b3c813a37402f93894cc48446c9b155e/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f52756e2532306f6e2d5245414e412d666633333336' for file in record.files: if str(file[""key""]) == ""reana.yaml"": return { 'img_url': img_url, 'url': u'https://reana.cern.ch/run?from=url;uri=zenodo.org/{}/files/reana.yaml'.format(record.get('recid')) } for item in record.get('related_identifiers', []): if item['scheme'] == ""url"" and item['identifier'].startswith(""https://reana.io/run""): return { 'img_url': img_url, 'url': item['identifier'] } return {} ","def get_reana_badge(record): img_url = current_app.config['REANA_BADGE_IMG_URL'] for file in record.files: if str(file[""key""]) == ""reana.yaml"": return { 'img_url': img_url, 'url': u'https://reana.cern.ch/run?from=url;uri=zenodo.org/{}/files/reana.yaml'.format(record.get('recid')) } for item in record.get('related_identifiers', []): if item['scheme'] == ""url"" and item['identifier'].startswith(""https://reana.io/run""): return { 'img_url': img_url, 'url': item['identifier'] } return {} " 55957,"def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format=""%(asctime)s - %(levelname)s - %(name)s - %(message)s"", datefmt=""%m/%d/%Y %H:%M:%S"", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) else: data_files = {} if args.train_file is not None: data_files[""train""] = args.train_file if args.validation_file is not None: data_files[""validation""] = args.validation_file extension = args.train_file.split(""."")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = XLNetConfig.from_pretrained(args.model_name_or_path) tokenizer = XLNetTokenizerFast.from_pretrained(args.model_name_or_path) model = XLNetForQuestionAnswering.from_pretrained( args.model_name_or_path, from_tf=bool("".ckpt"" in args.model_name_or_path), config=config ) # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. if args.do_train: column_names = raw_datasets[""train""].column_names elif args.do_eval: column_names = raw_datasets[""validation""].column_names else: column_names = raw_datasets[""test""].column_names question_column_name = ""question"" if ""question"" in column_names else column_names[0] context_column_name = ""context"" if ""context"" in column_names else column_names[1] answer_column_name = ""answers"" if ""answers"" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). pad_on_right = tokenizer.padding_side == ""right"" if args.max_seq_length > tokenizer.model_max_length: logger.warn( f""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"" f""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."" ) max_seq_length = min(args.max_seq_length, tokenizer.model_max_length) # Training preprocessing def prepare_train_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation=""only_second"" if pad_on_right else ""only_first"", max_length=max_seq_length, stride=args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, return_token_type_ids=True, padding=""max_length"", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop(""overflow_to_sample_mapping"") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop(""offset_mapping"") # The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers). special_tokens = tokenized_examples.pop(""special_tokens_mask"") # Let's label those examples! tokenized_examples[""start_positions""] = [] tokenized_examples[""end_positions""] = [] tokenized_examples[""is_impossible""] = [] tokenized_examples[""cls_index""] = [] tokenized_examples[""p_mask""] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples[""input_ids""][i] cls_index = input_ids.index(tokenizer.cls_token_id) tokenized_examples[""cls_index""].append(cls_index) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples[""token_type_ids""][i] for k, s in enumerate(special_tokens[i]): if s: sequence_ids[k] = 3 context_idx = 1 if pad_on_right else 0 # Build the p_mask: non special tokens and context gets 0.0, the others get 1.0. # The cls token gets 1.0 too (for predictions of empty answers). tokenized_examples[""p_mask""].append( [ 0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0 for k, s in enumerate(sequence_ids) ] ) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples[answer_column_name][sample_index] # If no answers are given, set the cls_index as answer. if len(answers[""answer_start""]) == 0: tokenized_examples[""start_positions""].append(cls_index) tokenized_examples[""end_positions""].append(cls_index) tokenized_examples[""is_impossible""].append(1.0) else: # Start/end character index of the answer in the text. start_char = answers[""answer_start""][0] end_char = start_char + len(answers[""text""][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != context_idx: token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != context_idx: token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples[""start_positions""].append(cls_index) tokenized_examples[""end_positions""].append(cls_index) tokenized_examples[""is_impossible""].append(1.0) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples[""start_positions""].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples[""end_positions""].append(token_end_index + 1) tokenized_examples[""is_impossible""].append(0.0) return tokenized_examples if args.do_train: if ""train"" not in raw_datasets: raise ValueError(""--do_train requires a train dataset"") train_dataset = raw_datasets[""train""] if args.max_train_samples is not None: # We will select sample from whole data if agument is specified train_dataset = train_dataset.select(range(args.max_train_samples)) # Create train feature from dataset train_dataset = train_dataset.map( prepare_train_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, ) if args.max_train_samples is not None: # Number of samples might increase during Feature Creation, We select only specified max samples train_dataset = train_dataset.select(range(args.max_train_samples)) # Validation preprocessing def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation=""only_second"" if pad_on_right else ""only_first"", max_length=max_seq_length, stride=args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, return_token_type_ids=True, padding=""max_length"", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop(""overflow_to_sample_mapping"") # The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers). special_tokens = tokenized_examples.pop(""special_tokens_mask"") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. tokenized_examples[""example_id""] = [] # We still provide the index of the CLS token and the p_mask to the model, but not the is_impossible label. tokenized_examples[""cls_index""] = [] tokenized_examples[""p_mask""] = [] for i, input_ids in enumerate(tokenized_examples[""input_ids""]): # Find the CLS token in the input ids. cls_index = input_ids.index(tokenizer.cls_token_id) tokenized_examples[""cls_index""].append(cls_index) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples[""token_type_ids""][i] for k, s in enumerate(special_tokens[i]): if s: sequence_ids[k] = 3 context_idx = 1 if pad_on_right else 0 # Build the p_mask: non special tokens and context gets 0.0, the others 1.0. tokenized_examples[""p_mask""].append( [ 0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0 for k, s in enumerate(sequence_ids) ] ) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples[""example_id""].append(examples[""id""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples[""offset_mapping""][i] = [ (o if sequence_ids[k] == context_idx else None) for k, o in enumerate(tokenized_examples[""offset_mapping""][i]) ] return tokenized_examples if args.do_eval: if ""validation"" not in raw_datasets: raise ValueError(""--do_eval requires a validation dataset"") eval_examples = raw_datasets[""validation""] if args.max_val_samples is not None: # We will select sample from whole data eval_examples = eval_examples.select(range(args.max_val_samples)) # Validation Feature Creation eval_dataset = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, ) if args.max_val_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again eval_dataset = eval_dataset.select(range(args.max_val_samples)) if args.do_predict: if ""test"" not in raw_datasets: raise ValueError(""--do_predict requires a test dataset"") test_examples = raw_datasets[""test""] if args.max_test_samples is not None: # We will select sample from whole data test_examples = test_examples.select(range(args.max_test_samples)) # Test Feature Creation test_dataset = test_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, ) if args.max_test_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again test_dataset = test_dataset.select(range(args.max_test_samples)) # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f""Sample {index} of the training set: {train_dataset[index]}."") # DataLoaders creation: if args.pad_to_max_length: # If padding was already done ot max length, we use the default data collator that will just convert everything # to tensors. data_collator = default_data_collator else: # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size ) if args.do_eval: eval_dataset.set_format(type=""torch"", columns=[""attention_mask"", ""input_ids"", ""token_type_ids""]) eval_dataloader = DataLoader( eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) if args.do_predict: test_dataset.set_format(type=""torch"", columns=[""attention_mask"", ""input_ids"", ""token_type_ids""]) test_dataloader = DataLoader( test_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) # Post-processing: def post_processing_function(examples, features, predictions, stage=""eval""): # Post-processing: we match the start logits and end logits to answers in the original context. predictions, scores_diff_json = postprocess_qa_predictions_with_beam_search( examples=examples, features=features, predictions=predictions, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, start_n_top=model.config.start_n_top, end_n_top=model.config.end_n_top, output_dir=args.output_dir, prefix=stage, ) # Format the result to the format the metric expects. if args.version_2_with_negative: formatted_predictions = [ {""id"": k, ""prediction_text"": v, ""no_answer_probability"": scores_diff_json[k]} for k, v in predictions.items() ] else: formatted_predictions = [{""id"": k, ""prediction_text"": v} for k, v in predictions.items()] references = [{""id"": ex[""id""], ""answers"": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) metric = load_metric(""squad_v2"" if args.version_2_with_negative else ""squad"") def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) def create_and_fill_np_array(start_or_end_logits, dataset, max_len): """""" Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor Args: start_or_end_logits(:obj:`tensor`): This is the output predictions of the model. We can only enter either start or end logits. eval_dataset: Evaluation dataset max_len(:obj:`int`): The maximum length of the output tensor. ( See the model.eval() part for more details ) """""" step_size = 0 # create a numpy array and fill it with -100. logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32) # Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather for i, output_logit in enumerate(start_or_end_logits): # populate columns # We have to fill it such that we have to take the whole tensor and replace it on the newly created array # And after every iteration we have to change the step_size batch_size = output_logit.shape[0] cols = output_logit.shape[1] logits_concat[step_size : step_size + batch_size, :cols] = output_logit step_size = batch_size return logits_concat # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = [""bias"", ""LayerNorm.weight""] optimizer_grouped_parameters = [ { ""params"": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], ""weight_decay"": args.weight_decay, }, { ""params"": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], ""weight_decay"": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be # shorter in multiprocess) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info(""***** Running training *****"") logger.info(f"" Num examples = {len(train_dataset)}"") logger.info(f"" Num Epochs = {args.num_train_epochs}"") logger.info(f"" Instantaneous batch size per device = {args.per_device_train_batch_size}"") logger.info(f"" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"") logger.info(f"" Gradient Accumulation steps = {args.gradient_accumulation_steps}"") logger.info(f"" Total optimization steps = {args.max_train_steps}"") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if completed_steps >= args.max_train_steps: break if args.do_eval: # intialize all lists to collect the batches all_start_top_log_probs = [] all_start_top_index = [] all_end_top_log_probs = [] all_end_top_index = [] all_cls_logits = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) start_top_log_probs = outputs.start_top_log_probs start_top_index = outputs.start_top_index end_top_log_probs = outputs.end_top_log_probs end_top_index = outputs.end_top_index cls_logits = outputs.cls_logits if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered start_top_log_probs = accelerator.pad_across_processes(start_top_log_probs, dim=1, pad_index=-100) start_top_index = accelerator.pad_across_processes(start_top_index, dim=1, pad_index=-100) end_top_log_probs = accelerator.pad_across_processes(end_top_log_probs, dim=1, pad_index=-100) end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100) all_start_top_log_probs.append(accelerator.gather(start_top_log_probs.cpu().numpy())) all_start_top_index.append(accelerator.gather(start_top_index.cpu().numpy())) all_end_top_log_probs.append(accelerator.gather(end_top_log_probs.cpu().numpy())) all_end_top_index.append(accelerator.gather(end_top_index.cpu().numpy())) all_cls_logits.append(accelerator.gather(cls_logits.cpu().numpy())) max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor # concatenate all numpy arrays collected above start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, eval_dataset, max_len) start_top_index_concat = create_and_fill_np_array(all_start_top_index, eval_dataset, max_len) end_top_log_probs_concat = create_and_fill_np_array(all_end_top_log_probs, eval_dataset, max_len) end_top_index_concat = create_and_fill_np_array(all_end_top_index, eval_dataset, max_len) del start_top_log_probs del start_top_index del end_top_log_probs del end_top_index eval_dataset.set_format(type=None, columns=list(eval_dataset.features.keys())) outputs_numpy = ( start_top_log_probs_concat, start_top_index_concat, end_top_log_probs_concat, end_top_index_concat, cls_logits, ) predictions = post_processing_function(eval_examples, eval_dataset, outputs_numpy) eval_metric = compute_metrics(predictions) logger.info(f""Test metrics: {eval_metric}"") if args.do_predict: # intialize all lists to collect the batches all_start_top_log_probs = [] all_start_top_index = [] all_end_top_log_probs = [] all_end_top_index = [] all_cls_logits = [] for step, batch in enumerate(test_dataloader): with torch.no_grad(): outputs = model(**batch) start_top_log_probs = outputs.start_top_log_probs start_top_index = outputs.start_top_index end_top_log_probs = outputs.end_top_log_probs end_top_index = outputs.end_top_index cls_logits = outputs.cls_logits if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered start_top_log_probs = accelerator.pad_across_processes(start_top_log_probs, dim=1, pad_index=-100) start_top_index = accelerator.pad_across_processes(start_top_index, dim=1, pad_index=-100) end_top_log_probs = accelerator.pad_across_processes(end_top_log_probs, dim=1, pad_index=-100) end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100) all_start_top_log_probs.append(accelerator.gather(start_top_log_probs.cpu().numpy())) all_start_top_index.append(accelerator.gather(start_top_index.cpu().numpy())) all_end_top_log_probs.append(accelerator.gather(end_top_log_probs.cpu().numpy())) all_end_top_index.append(accelerator.gather(end_top_index.cpu().numpy())) all_cls_logits.append(accelerator.gather(cls_logits.cpu().numpy())) max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor # concatenate all numpy arrays collected above start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, test_dataset, max_len) start_top_index_concat = create_and_fill_np_array(all_start_top_index, test_dataset, max_len) end_top_log_probs_concat = create_and_fill_np_array(all_end_top_log_probs, test_dataset, max_len) end_top_index_concat = create_and_fill_np_array(all_end_top_index, test_dataset, max_len) del start_top_log_probs del start_top_index del end_top_log_probs del end_top_index test_dataset.set_format(type=None, columns=list(test_dataset.features.keys())) outputs_numpy = ( start_top_log_probs_concat, start_top_index_concat, end_top_log_probs_concat, end_top_index_concat, cls_logits, ) predictions = post_processing_function(test_examples, test_dataset, outputs_numpy) test_metric = compute_metrics(predictions) logger.info(f""Test metrics: {test_metric}"") if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) ","def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format=""%(asctime)s - %(levelname)s - %(name)s - %(message)s"", datefmt=""%m/%d/%Y %H:%M:%S"", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) else: data_files = {} if args.train_file is not None: data_files[""train""] = args.train_file if args.validation_file is not None: data_files[""validation""] = args.validation_file extension = args.train_file.split(""."")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = XLNetConfig.from_pretrained(args.model_name_or_path) tokenizer = XLNetTokenizerFast.from_pretrained(args.model_name_or_path) model = XLNetForQuestionAnswering.from_pretrained( args.model_name_or_path, from_tf=bool("".ckpt"" in args.model_name_or_path), config=config ) # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. if args.do_train: column_names = raw_datasets[""train""].column_names elif args.do_eval: column_names = raw_datasets[""validation""].column_names else: column_names = raw_datasets[""test""].column_names question_column_name = ""question"" if ""question"" in column_names else column_names[0] context_column_name = ""context"" if ""context"" in column_names else column_names[1] answer_column_name = ""answers"" if ""answers"" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). pad_on_right = tokenizer.padding_side == ""right"" if args.max_seq_length > tokenizer.model_max_length: logger.warn( f""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"" f""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."" ) max_seq_length = min(args.max_seq_length, tokenizer.model_max_length) # Training preprocessing def prepare_train_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation=""only_second"" if pad_on_right else ""only_first"", max_length=max_seq_length, stride=args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, return_token_type_ids=True, padding=""max_length"", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop(""overflow_to_sample_mapping"") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop(""offset_mapping"") # The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers). special_tokens = tokenized_examples.pop(""special_tokens_mask"") # Let's label those examples! tokenized_examples[""start_positions""] = [] tokenized_examples[""end_positions""] = [] tokenized_examples[""is_impossible""] = [] tokenized_examples[""cls_index""] = [] tokenized_examples[""p_mask""] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples[""input_ids""][i] cls_index = input_ids.index(tokenizer.cls_token_id) tokenized_examples[""cls_index""].append(cls_index) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples[""token_type_ids""][i] for k, s in enumerate(special_tokens[i]): if s: sequence_ids[k] = 3 context_idx = 1 if pad_on_right else 0 # Build the p_mask: non special tokens and context gets 0.0, the others get 1.0. # The cls token gets 1.0 too (for predictions of empty answers). tokenized_examples[""p_mask""].append( [ 0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0 for k, s in enumerate(sequence_ids) ] ) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples[answer_column_name][sample_index] # If no answers are given, set the cls_index as answer. if len(answers[""answer_start""]) == 0: tokenized_examples[""start_positions""].append(cls_index) tokenized_examples[""end_positions""].append(cls_index) tokenized_examples[""is_impossible""].append(1.0) else: # Start/end character index of the answer in the text. start_char = answers[""answer_start""][0] end_char = start_char + len(answers[""text""][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != context_idx: token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != context_idx: token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples[""start_positions""].append(cls_index) tokenized_examples[""end_positions""].append(cls_index) tokenized_examples[""is_impossible""].append(1.0) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples[""start_positions""].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples[""end_positions""].append(token_end_index + 1) tokenized_examples[""is_impossible""].append(0.0) return tokenized_examples if args.do_train: if ""train"" not in raw_datasets: raise ValueError(""--do_train requires a train dataset"") train_dataset = raw_datasets[""train""] if args.max_train_samples is not None: # We will select sample from whole data if agument is specified train_dataset = train_dataset.select(range(args.max_train_samples)) # Create train feature from dataset train_dataset = train_dataset.map( prepare_train_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, ) if args.max_train_samples is not None: # Number of samples might increase during Feature Creation, We select only specified max samples train_dataset = train_dataset.select(range(args.max_train_samples)) # Validation preprocessing def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation=""only_second"" if pad_on_right else ""only_first"", max_length=max_seq_length, stride=args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, return_token_type_ids=True, padding=""max_length"", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop(""overflow_to_sample_mapping"") # The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers). special_tokens = tokenized_examples.pop(""special_tokens_mask"") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. tokenized_examples[""example_id""] = [] # We still provide the index of the CLS token and the p_mask to the model, but not the is_impossible label. tokenized_examples[""cls_index""] = [] tokenized_examples[""p_mask""] = [] for i, input_ids in enumerate(tokenized_examples[""input_ids""]): # Find the CLS token in the input ids. cls_index = input_ids.index(tokenizer.cls_token_id) tokenized_examples[""cls_index""].append(cls_index) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples[""token_type_ids""][i] for k, s in enumerate(special_tokens[i]): if s: sequence_ids[k] = 3 context_idx = 1 if pad_on_right else 0 # Build the p_mask: non special tokens and context gets 0.0, the others 1.0. tokenized_examples[""p_mask""].append( [ 0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0 for k, s in enumerate(sequence_ids) ] ) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples[""example_id""].append(examples[""id""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples[""offset_mapping""][i] = [ (o if sequence_ids[k] == context_idx else None) for k, o in enumerate(tokenized_examples[""offset_mapping""][i]) ] return tokenized_examples if args.do_eval: if ""validation"" not in raw_datasets: raise ValueError(""--do_eval requires a validation dataset"") eval_examples = raw_datasets[""validation""] if args.max_val_samples is not None: # We will select sample from whole data eval_examples = eval_examples.select(range(args.max_val_samples)) # Validation Feature Creation eval_dataset = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, ) if args.max_val_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again eval_dataset = eval_dataset.select(range(args.max_val_samples)) if args.do_predict: if ""test"" not in raw_datasets: raise ValueError(""--do_predict requires a test dataset"") test_examples = raw_datasets[""test""] if args.max_test_samples is not None: # We will select sample from whole data test_examples = test_examples.select(range(args.max_test_samples)) # Test Feature Creation test_dataset = test_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, ) if args.max_test_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again test_dataset = test_dataset.select(range(args.max_test_samples)) # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f""Sample {index} of the training set: {train_dataset[index]}."") # DataLoaders creation: if args.pad_to_max_length: # If padding was already done ot max length, we use the default data collator that will just convert everything # to tensors. data_collator = default_data_collator else: # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size ) if args.do_eval: eval_dataset.set_format(type=""torch"", columns=[""attention_mask"", ""input_ids"", ""token_type_ids""]) eval_dataloader = DataLoader( eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) if args.do_predict: test_dataset.set_format(type=""torch"", columns=[""attention_mask"", ""input_ids"", ""token_type_ids""]) test_dataloader = DataLoader( test_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) # Post-processing: def post_processing_function(examples, features, predictions, stage=""eval""): # Post-processing: we match the start logits and end logits to answers in the original context. predictions, scores_diff_json = postprocess_qa_predictions_with_beam_search( examples=examples, features=features, predictions=predictions, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, start_n_top=model.config.start_n_top, end_n_top=model.config.end_n_top, output_dir=args.output_dir, prefix=stage, ) # Format the result to the format the metric expects. if args.version_2_with_negative: formatted_predictions = [ {""id"": k, ""prediction_text"": v, ""no_answer_probability"": scores_diff_json[k]} for k, v in predictions.items() ] else: formatted_predictions = [{""id"": k, ""prediction_text"": v} for k, v in predictions.items()] references = [{""id"": ex[""id""], ""answers"": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) metric = load_metric(""squad_v2"" if args.version_2_with_negative else ""squad"") def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) def create_and_fill_np_array(start_or_end_logits, dataset, max_len): """""" Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor Args: start_or_end_logits(:obj:`tensor`): This is the output predictions of the model. We can only enter either start or end logits. eval_dataset: Evaluation dataset max_len(:obj:`int`): The maximum length of the output tensor. ( See the model.eval() part for more details ) """""" step_size = 0 # create a numpy array and fill it with -100. logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32) # Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather for i, output_logit in enumerate(start_or_end_logits): # populate columns # We have to fill it such that we have to take the whole tensor and replace it on the newly created array # And after every iteration we have to change the step_size batch_size = output_logit.shape[0] cols = output_logit.shape[1] logits_concat[step_size : step_size + batch_size, :cols] = output_logit step_size = batch_size return logits_concat # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = [""bias"", ""LayerNorm.weight""] optimizer_grouped_parameters = [ { ""params"": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], ""weight_decay"": args.weight_decay, }, { ""params"": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], ""weight_decay"": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be # shorter in multiprocess) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info(""***** Running training *****"") logger.info(f"" Num examples = {len(train_dataset)}"") logger.info(f"" Num Epochs = {args.num_train_epochs}"") logger.info(f"" Instantaneous batch size per device = {args.per_device_train_batch_size}"") logger.info(f"" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"") logger.info(f"" Gradient Accumulation steps = {args.gradient_accumulation_steps}"") logger.info(f"" Total optimization steps = {args.max_train_steps}"") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if completed_steps >= args.max_train_steps: break if args.do_eval: # intialize all lists to collect the batches all_start_top_log_probs = [] all_start_top_index = [] all_end_top_log_probs = [] all_end_top_index = [] all_cls_logits = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) start_top_log_probs = outputs.start_top_log_probs start_top_index = outputs.start_top_index end_top_log_probs = outputs.end_top_log_probs end_top_index = outputs.end_top_index cls_logits = outputs.cls_logits if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered start_top_log_probs = accelerator.pad_across_processes(start_top_log_probs, dim=1, pad_index=-100) start_top_index = accelerator.pad_across_processes(start_top_index, dim=1, pad_index=-100) end_top_log_probs = accelerator.pad_across_processes(end_top_log_probs, dim=1, pad_index=-100) end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100) all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy()) all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy()) all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy()) all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy()) all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy()) max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor # concatenate all numpy arrays collected above start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, eval_dataset, max_len) start_top_index_concat = create_and_fill_np_array(all_start_top_index, eval_dataset, max_len) end_top_log_probs_concat = create_and_fill_np_array(all_end_top_log_probs, eval_dataset, max_len) end_top_index_concat = create_and_fill_np_array(all_end_top_index, eval_dataset, max_len) del start_top_log_probs del start_top_index del end_top_log_probs del end_top_index eval_dataset.set_format(type=None, columns=list(eval_dataset.features.keys())) outputs_numpy = ( start_top_log_probs_concat, start_top_index_concat, end_top_log_probs_concat, end_top_index_concat, cls_logits, ) predictions = post_processing_function(eval_examples, eval_dataset, outputs_numpy) eval_metric = compute_metrics(predictions) logger.info(f""Test metrics: {eval_metric}"") if args.do_predict: # intialize all lists to collect the batches all_start_top_log_probs = [] all_start_top_index = [] all_end_top_log_probs = [] all_end_top_index = [] all_cls_logits = [] for step, batch in enumerate(test_dataloader): with torch.no_grad(): outputs = model(**batch) start_top_log_probs = outputs.start_top_log_probs start_top_index = outputs.start_top_index end_top_log_probs = outputs.end_top_log_probs end_top_index = outputs.end_top_index cls_logits = outputs.cls_logits if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered start_top_log_probs = accelerator.pad_across_processes(start_top_log_probs, dim=1, pad_index=-100) start_top_index = accelerator.pad_across_processes(start_top_index, dim=1, pad_index=-100) end_top_log_probs = accelerator.pad_across_processes(end_top_log_probs, dim=1, pad_index=-100) end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100) all_start_top_log_probs.append(accelerator.gather(start_top_log_probs.cpu().numpy())) all_start_top_index.append(accelerator.gather(start_top_index.cpu().numpy())) all_end_top_log_probs.append(accelerator.gather(end_top_log_probs.cpu().numpy())) all_end_top_index.append(accelerator.gather(end_top_index.cpu().numpy())) all_cls_logits.append(accelerator.gather(cls_logits.cpu().numpy())) max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor # concatenate all numpy arrays collected above start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, test_dataset, max_len) start_top_index_concat = create_and_fill_np_array(all_start_top_index, test_dataset, max_len) end_top_log_probs_concat = create_and_fill_np_array(all_end_top_log_probs, test_dataset, max_len) end_top_index_concat = create_and_fill_np_array(all_end_top_index, test_dataset, max_len) del start_top_log_probs del start_top_index del end_top_log_probs del end_top_index test_dataset.set_format(type=None, columns=list(test_dataset.features.keys())) outputs_numpy = ( start_top_log_probs_concat, start_top_index_concat, end_top_log_probs_concat, end_top_index_concat, cls_logits, ) predictions = post_processing_function(test_examples, test_dataset, outputs_numpy) test_metric = compute_metrics(predictions) logger.info(f""Test metrics: {test_metric}"") if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) " 58183,"def lookup_events(args: dict): client = aws_session( region=args.get('region'), roleArn=args.get('roleArn'), roleSessionName=args.get('roleSessionName'), roleSessionDuration=args.get('roleSessionDuration'), ) data = [] kwargs = { 'LookupAttributes': [{ 'AttributeKey': args.get('attributeKey'), 'AttributeValue': args.get('attributeValue') }] } if args.get('startTime') is not None: kwargs.update({'StartTime': datetime.strptime(args.get('startTime'), # type:ignore ""%Y-%m-%dT%H:%M:%S"")}) if args.get('endTime') is not None: kwargs.update( {'EndTime': datetime.strptime(args.get('endTime'), ""%Y-%m-%dT%H:%M:%S"")}) # type:ignore client.lookup_events(**kwargs) paginator = client.get_paginator('lookup_events') demisto.log(**kwargs) demisto.log(""This is the end"") for response in paginator.paginate(**kwargs): for i, event in enumerate(response['Events']): data.append({ 'EventId': event.get('EventId'), 'EventName': event.get('EventName'), 'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')), 'EventSource': event.get('EventSource'), 'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None, 'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None, 'CloudTrailEvent': event.get('CloudTrailEvent') }) if 'Username' in event: data[i].update({'Username': event['Username']}) ec = {'AWS.CloudTrail.Events(val.EventId == obj.EventId)': data} human_readable = tableToMarkdown('AWS CloudTrail Trails', data) return_outputs(human_readable, ec) ","def lookup_events(args: dict): client = aws_session( region=args.get('region'), roleArn=args.get('roleArn'), roleSessionName=args.get('roleSessionName'), roleSessionDuration=args.get('roleSessionDuration'), ) data = [] kwargs = { 'LookupAttributes': [{ 'AttributeKey': args.get('attributeKey'), 'AttributeValue': args.get('attributeValue') }] } if args.get('startTime') is not None: kwargs.update({'StartTime': datetime.strptime(args.get('startTime'), # type:ignore ""%Y-%m-%dT%H:%M:%S"")}) if args.get('endTime') is not None: kwargs.update( {'EndTime': datetime.strptime(args.get('endTime'), ""%Y-%m-%dT%H:%M:%S"")}) # type:ignore client.lookup_events(**kwargs) paginator = client.get_paginator('lookup_events') demisto. info(**kwargs) demisto. info(""This is the end"") for response in paginator.paginate(**kwargs): for i, event in enumerate(response['Events']): data.append({ 'EventId': event.get('EventId'), 'EventName': event.get('EventName'), 'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')), 'EventSource': event.get('EventSource'), 'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None, 'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None, 'CloudTrailEvent': event.get('CloudTrailEvent') }) if 'Username' in event: data[i].update({'Username': event['Username']}) ec = {'AWS.CloudTrail.Events(val.EventId == obj.EventId)': data} human_readable = tableToMarkdown('AWS CloudTrail Trails', data) return_outputs(human_readable, ec) " 3847,"def to_pandas_edgelist( G, source=""source"", target=""target"", nodelist=None, dtype=None, order=None, edge_key=None, ): """"""Returns the graph edge list as a Pandas DataFrame. Parameters ---------- G : graph The NetworkX graph used to construct the Pandas DataFrame. source : str or int, optional A valid column name (string or integer) for the source nodes (for the directed case). target : str or int, optional A valid column name (string or integer) for the target nodes (for the directed case). nodelist : list, optional Use only nodes specified in nodelist dtype : dtype, default None Use to create the DataFrame. Data type to force. Only a single dtype is allowed. If None, infer. order : None An unused parameter mistakenly included in the function. This is deprecated and will be removed in NetworkX v3.0. edge_key : str or int or None, optional (default=None) A valid column name (string or integer) for the edge keys (for the multigraph case). If None, edge keys are not stored in the DataFrame. Returns ------- df : Pandas DataFrame Graph edge list Examples -------- >>> G = nx.Graph( ... [ ... (""A"", ""B"", {""cost"": 1, ""weight"": 7}), ... (""C"", ""E"", {""cost"": 9, ""weight"": 10}), ... ] ... ) >>> df = nx.to_pandas_edgelist(G, nodelist=[""A"", ""C""]) >>> df[[""source"", ""target"", ""cost"", ""weight""]] source target cost weight 0 A B 1 7 1 C E 9 10 >>> G = nx.MultiGraph([('A', 'B', {'cost': 1}), ('A', 'B', {'cost': 9})]) >>> df = nx.to_pandas_edgelist(G, nodelist=['A', 'C'], edge_key='ekey') >>> df[['source', 'target', 'cost', 'ekey']] source target cost ekey 0 A B 1 0 1 A B 9 1 """""" import pandas as pd if nodelist is None: edgelist = G.edges(data=True) else: edgelist = G.edges(nodelist, data=True) source_nodes = [s for s, t, d in edgelist] target_nodes = [t for s, t, d in edgelist] all_attrs = set().union(*(d.keys() for s, t, d in edgelist)) if source in all_attrs: raise nx.NetworkXError(f""Source name '{source}' is an edge attr name"") if target in all_attrs: raise nx.NetworkXError(f""Target name '{target}' is an edge attr name"") nan = float(""nan"") edge_attr = {k: [d.get(k, nan) for s, t, d in edgelist] for k in all_attrs} if G.is_multigraph() and edge_key is not None: if edge_key in all_attrs: raise nx.NetworkXError(f""Edge key name '{edge_key}' is an edge attr name"") edge_keys = [k for s, t, k in G.edges(keys=True)] edgelistdict = {source: source_nodes, target: target_nodes, edge_key: edge_keys} else: edgelistdict = {source: source_nodes, target: target_nodes} edgelistdict.update(edge_attr) return pd.DataFrame(edgelistdict, dtype=dtype) ","def to_pandas_edgelist( G, source=""source"", target=""target"", nodelist=None, dtype=None, order=None, edge_key=None, ): """"""Returns the graph edge list as a Pandas DataFrame. Parameters ---------- G : graph The NetworkX graph used to construct the Pandas DataFrame. source : str or int, optional A valid column name (string or integer) for the source nodes (for the directed case). target : str or int, optional A valid column name (string or integer) for the target nodes (for the directed case). nodelist : list, optional Use only nodes specified in nodelist dtype : dtype, default None Use to create the DataFrame. Data type to force. Only a single dtype is allowed. If None, infer. order : None An unused parameter mistakenly included in the function. This is deprecated and will be removed in NetworkX v3.0. edge_key : str or int or None, optional (default=None) A valid column name (string or integer) for the edge keys (for the multigraph case). If None, edge keys are not stored in the DataFrame. Returns ------- df : Pandas DataFrame Graph edge list Examples -------- >>> G = nx.Graph( ... [ ... (""A"", ""B"", {""cost"": 1, ""weight"": 7}), ... (""C"", ""E"", {""cost"": 9, ""weight"": 10}), ... ] ... ) >>> df = nx.to_pandas_edgelist(G, nodelist=[""A"", ""C""]) >>> df[[""source"", ""target"", ""cost"", ""weight""]] source target cost weight 0 A B 1 7 1 C E 9 10 >>> G = nx.MultiGraph([('A', 'B', {'cost': 1}), ('A', 'B', {'cost': 9})]) >>> df = nx.to_pandas_edgelist(G, nodelist=['A', 'C'], edge_key='ekey') >>> df[['source', 'target', 'cost', 'ekey']] source target cost ekey 0 A B 1 0 1 A B 9 1 """""" import pandas as pd if nodelist is None: edgelist = G.edges(data=True) else: edgelist = G.edges(nodelist, data=True) source_nodes = [s for s, t, d in edgelist] target_nodes = [t for s, t, d in edgelist] all_attrs = set().union(*(d.keys() for s, t, d in edgelist)) if source in all_attrs: raise nx.NetworkXError(f""Source name '{source}' is an edge attr name"") if target in all_attrs: raise nx.NetworkXError(f""Target name '{target}' is an edge attr name"") nan = float(""nan"") edge_attr = {k: [d.get(k, nan) for _, _, d in edgelist] for k in all_attrs} if G.is_multigraph() and edge_key is not None: if edge_key in all_attrs: raise nx.NetworkXError(f""Edge key name '{edge_key}' is an edge attr name"") edge_keys = [k for s, t, k in G.edges(keys=True)] edgelistdict = {source: source_nodes, target: target_nodes, edge_key: edge_keys} else: edgelistdict = {source: source_nodes, target: target_nodes} edgelistdict.update(edge_attr) return pd.DataFrame(edgelistdict, dtype=dtype) " 54202,"def train(args): """"""Train with the given args. Args: args (namespace): The program arguments. """""" set_deterministic_pytorch(args) if args.num_encs > 1: args = format_mulenc_args(args) # check cuda availability if not torch.cuda.is_available(): logging.warning('cuda is not available') # get input and output dimension info with open(args.valid_json, 'rb') as f: valid_json = json.load(f)['utts'] utts = list(valid_json.keys()) idim_list = [int(valid_json[utts[0]]['input'][i]['shape'][-1]) for i in range(args.num_encs)] odim = int(valid_json[utts[0]]['output'][0]['shape'][-1]) for i in range(args.num_encs): logging.info('stream{}: input dims : {}'.format(i + 1, idim_list[i])) logging.info('#output dims: ' + str(odim)) # specify attention, CTC, hybrid mode if args.mtlalpha == 1.0: mtl_mode = 'ctc' logging.info('Pure CTC mode') elif args.mtlalpha == 0.0: mtl_mode = 'att' logging.info('Pure attention mode') else: mtl_mode = 'mtl' logging.info('Multitask learning mode') if (args.enc_init is not None or args.dec_init is not None) and args.num_encs == 1: model = load_trained_modules(idim_list[0], odim, args) else: model_class = dynamic_import(args.model_module) model = model_class(idim_list[0] if args.num_encs == 1 else idim_list, odim, args) assert isinstance(model, ASRInterface) if args.rnnlm is not None: rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf) rnnlm = lm_pytorch.ClassifierWithState( lm_pytorch.RNNLM( len(args.char_list), rnnlm_args.layer, rnnlm_args.unit)) torch_load(args.rnnlm, rnnlm) model.rnnlm = rnnlm # write model config if not os.path.exists(args.outdir): os.makedirs(args.outdir) model_conf = args.outdir + '/model.json' with open(model_conf, 'wb') as f: logging.info('writing a model config file to ' + model_conf) f.write(json.dumps((idim_list[0] if args.num_encs == 1 else idim_list, odim, vars(args)), indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8')) for key in sorted(vars(args).keys()): logging.info('ARGS: ' + key + ': ' + str(vars(args)[key])) reporter = model.reporter # check the use of multi-gpu if args.ngpu > 1: if args.batch_size != 0: logging.warning('batch size is automatically increased (%d -> %d)' % ( args.batch_size, args.batch_size * args.ngpu)) args.batch_size *= args.ngpu if args.num_encs > 1: # TODO(ruizhili): implement data parallel for multi-encoder setup. raise NotImplementedError(""Data parallel is not supported for multi-encoder setup."") # set torch device device = torch.device(""cuda"" if args.ngpu > 0 else ""cpu"") if args.train_dtype in (""float16"", ""float32"", ""float64""): dtype = getattr(torch, args.train_dtype) else: dtype = torch.float32 model = model.to(device=device, dtype=dtype) # Setup an optimizer if args.opt == 'adadelta': optimizer = torch.optim.Adadelta( model.parameters(), rho=0.95, eps=args.eps, weight_decay=args.weight_decay) elif args.opt == 'adam': optimizer = torch.optim.Adam(model.parameters(), weight_decay=args.weight_decay) elif args.opt == 'noam': from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt optimizer = get_std_opt(model, args.adim, args.transformer_warmup_steps, args.transformer_lr) else: raise NotImplementedError(""unknown optimizer: "" + args.opt) # setup apex.amp if args.train_dtype in (""O0"", ""O1"", ""O2"", ""O3""): try: from apex import amp except ImportError as e: logging.error(f""You need to install apex for --train-dtype {args.train_dtype}. "" ""See https://github.com/NVIDIA/apex#linux"") raise e if args.opt == 'noam': model, optimizer.optimizer = amp.initialize(model, optimizer.optimizer, opt_level=args.train_dtype) else: model, optimizer = amp.initialize(model, optimizer, opt_level=args.train_dtype) use_apex = True if args.ctc_type == ""builtin"": from espnet.nets.pytorch_backend.ctc import CTC amp.register_float_function(CTC, ""loss_fn"") amp.init() logging.warning('register pytorch builtin ctc as float function') else: use_apex = False # FIXME: TOO DIRTY HACK setattr(optimizer, ""target"", reporter) setattr(optimizer, ""serialize"", lambda s: reporter.serialize(s)) # Setup a converter if args.num_encs == 1: converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype) else: converter = CustomConverterMulEnc([i[0] for i in model.subsample_list], dtype=dtype) # read json data with open(args.train_json, 'rb') as f: train_json = json.load(f)['utts'] with open(args.valid_json, 'rb') as f: valid_json = json.load(f)['utts'] use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0 # make minibatch list (variable length) train = make_batchset(train_json, args.batch_size, args.maxlen_in, args.maxlen_out, args.minibatches, min_batch_size=args.ngpu if args.ngpu > 1 else 1, shortest_first=use_sortagrad, count=args.batch_count, batch_bins=args.batch_bins, batch_frames_in=args.batch_frames_in, batch_frames_out=args.batch_frames_out, batch_frames_inout=args.batch_frames_inout, iaxis=0, oaxis=0) valid = make_batchset(valid_json, args.batch_size, args.maxlen_in, args.maxlen_out, args.minibatches, min_batch_size=args.ngpu if args.ngpu > 1 else 1, count=args.batch_count, batch_bins=args.batch_bins, batch_frames_in=args.batch_frames_in, batch_frames_out=args.batch_frames_out, batch_frames_inout=args.batch_frames_inout, iaxis=0, oaxis=0) load_tr = LoadInputsAndTargets( mode='asr', load_output=True, preprocess_conf=args.preprocess_conf, preprocess_args={'train': True} # Switch the mode of preprocessing ) load_cv = LoadInputsAndTargets( mode='asr', load_output=True, preprocess_conf=args.preprocess_conf, preprocess_args={'train': False} # Switch the mode of preprocessing ) # hack to make batchsize argument as 1 # actual bathsize is included in a list # default collate function converts numpy array to pytorch tensor # we used an empty collate function instead which returns list train_iter = {'main': ChainerDataLoader( dataset=TransformDataset(train, lambda data: converter([load_tr(data)])), batch_size=1, num_workers=args.n_iter_processes, shuffle=not use_sortagrad, collate_fn=lambda x: x[0])} valid_iter = {'main': ChainerDataLoader( dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])), batch_size=1, shuffle=False, collate_fn=lambda x: x[0], num_workers=args.n_iter_processes)} # Set up a trainer updater = CustomUpdater( model, args.grad_clip, train_iter, optimizer, device, args.ngpu, args.grad_noise, args.accum_grad, use_apex=use_apex) trainer = training.Trainer( updater, (args.epochs, 'epoch'), out=args.outdir) if use_sortagrad: trainer.extend(ShufflingEnabler([train_iter]), trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, 'epoch')) # Resume from a snapshot if args.resume: logging.info('resumed from %s' % args.resume) torch_resume(args.resume, trainer) # Evaluate the model with the test dataset for each epoch if args.save_interval_iters > 0: trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu), trigger=(args.save_interval_iters, 'iteration')) else: trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu)) # Save attention weight each epoch if args.num_save_attention > 0 and args.mtlalpha != 1.0: data = sorted(list(valid_json.items())[:args.num_save_attention], key=lambda x: int(x[1]['input'][0]['shape'][1]), reverse=True) if hasattr(model, ""module""): att_vis_fn = model.module.calculate_all_attentions plot_class = model.module.attention_plot_class else: att_vis_fn = model.calculate_all_attentions plot_class = model.attention_plot_class att_reporter = plot_class( att_vis_fn, data, args.outdir + ""/att_ws"", converter=converter, transform=load_cv, device=device) trainer.extend(att_reporter, trigger=(1, 'epoch')) else: att_reporter = None # Make a plot for training and validation values if args.num_encs > 1: report_keys_loss_ctc = ['main/loss_ctc{}'.format(i + 1) for i in range(model.num_encs)] + [ 'validation/main/loss_ctc{}'.format(i + 1) for i in range(model.num_encs)] report_keys_cer_ctc = ['main/cer_ctc{}'.format(i + 1) for i in range(model.num_encs)] + [ 'validation/main/cer_ctc{}'.format(i + 1) for i in range(model.num_encs)] trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss', 'main/loss_ctc', 'validation/main/loss_ctc', 'main/loss_att', 'validation/main/loss_att'] + ([] if args.num_encs == 1 else report_keys_loss_ctc), 'epoch', file_name='loss.png')) trainer.extend(extensions.PlotReport(['main/acc', 'validation/main/acc'], 'epoch', file_name='acc.png')) trainer.extend(extensions.PlotReport( ['main/cer_ctc', 'validation/main/cer_ctc'] + ([] if args.num_encs == 1 else report_keys_loss_ctc), 'epoch', file_name='cer.png')) # Save best models trainer.extend(snapshot_object(model, 'model.loss.best'), trigger=training.triggers.MinValueTrigger('validation/main/loss')) if mtl_mode != 'ctc': trainer.extend(snapshot_object(model, 'model.acc.best'), trigger=training.triggers.MaxValueTrigger('validation/main/acc')) # save snapshot which contains model and optimizer states if args.save_interval_iters > 0: trainer.extend(torch_snapshot(filename='snapshot.iter.{.updater.iteration}'), trigger=(args.save_interval_iters, 'iteration')) else: trainer.extend(torch_snapshot(), trigger=(1, 'epoch')) # epsilon decay in the optimizer if args.opt == 'adadelta': if args.criterion == 'acc' and mtl_mode != 'ctc': trainer.extend(restore_snapshot(model, args.outdir + '/model.acc.best', load_fn=torch_load), trigger=CompareValueTrigger( 'validation/main/acc', lambda best_value, current_value: best_value > current_value)) trainer.extend(adadelta_eps_decay(args.eps_decay), trigger=CompareValueTrigger( 'validation/main/acc', lambda best_value, current_value: best_value > current_value)) elif args.criterion == 'loss': trainer.extend(restore_snapshot(model, args.outdir + '/model.loss.best', load_fn=torch_load), trigger=CompareValueTrigger( 'validation/main/loss', lambda best_value, current_value: best_value < current_value)) trainer.extend(adadelta_eps_decay(args.eps_decay), trigger=CompareValueTrigger( 'validation/main/loss', lambda best_value, current_value: best_value < current_value)) # Write a log of evaluation statistics for each epoch trainer.extend(extensions.LogReport(trigger=(args.report_interval_iters, 'iteration'))) report_keys = ['epoch', 'iteration', 'main/loss', 'main/loss_ctc', 'main/loss_att', 'validation/main/loss', 'validation/main/loss_ctc', 'validation/main/loss_att', 'main/acc', 'validation/main/acc', 'main/cer_ctc', 'validation/main/cer_ctc', 'elapsed_time'] + ([] if args.num_encs == 1 else report_keys_cer_ctc + report_keys_loss_ctc) if args.opt == 'adadelta': trainer.extend(extensions.observe_value( 'eps', lambda trainer: trainer.updater.get_optimizer('main').param_groups[0][""eps""]), trigger=(args.report_interval_iters, 'iteration')) report_keys.append('eps') if args.report_cer: report_keys.append('validation/main/cer') if args.report_wer: report_keys.append('validation/main/wer') trainer.extend(extensions.PrintReport( report_keys), trigger=(args.report_interval_iters, 'iteration')) trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters)) set_early_stop(trainer, args) if args.tensorboard_dir is not None and args.tensorboard_dir != """": trainer.extend(TensorboardLogger(SummaryWriter(args.tensorboard_dir), att_reporter), trigger=(args.report_interval_iters, ""iteration"")) # Run the training trainer.run() check_early_stop(trainer, args.epochs) ","def train(args): """"""Train with the given args. Args: args (namespace): The program arguments. """""" set_deterministic_pytorch(args) if args.num_encs > 1: args = format_mulenc_args(args) # check cuda availability if not torch.cuda.is_available(): logging.warning('cuda is not available') # get input and output dimension info with open(args.valid_json, 'rb') as f: valid_json = json.load(f)['utts'] utts = list(valid_json.keys()) idim_list = [int(valid_json[utts[0]]['input'][i]['shape'][-1]) for i in range(args.num_encs)] odim = int(valid_json[utts[0]]['output'][0]['shape'][-1]) for i in range(args.num_encs): logging.info('stream{}: input dims : {}'.format(i + 1, idim_list[i])) logging.info('#output dims: ' + str(odim)) # specify attention, CTC, hybrid mode if args.mtlalpha == 1.0: mtl_mode = 'ctc' logging.info('Pure CTC mode') elif args.mtlalpha == 0.0: mtl_mode = 'att' logging.info('Pure attention mode') else: mtl_mode = 'mtl' logging.info('Multitask learning mode') if (args.enc_init is not None or args.dec_init is not None) and args.num_encs == 1: model = load_trained_modules(idim_list[0], odim, args) else: model_class = dynamic_import(args.model_module) model = model_class(idim_list[0] if args.num_encs == 1 else idim_list, odim, args) assert isinstance(model, ASRInterface) if args.rnnlm is not None: rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf) rnnlm = lm_pytorch.ClassifierWithState( lm_pytorch.RNNLM( len(args.char_list), rnnlm_args.layer, rnnlm_args.unit)) torch_load(args.rnnlm, rnnlm) model.rnnlm = rnnlm # write model config if not os.path.exists(args.outdir): os.makedirs(args.outdir) model_conf = args.outdir + '/model.json' with open(model_conf, 'wb') as f: logging.info('writing a model config file to ' + model_conf) f.write(json.dumps((idim_list[0] if args.num_encs == 1 else idim_list, odim, vars(args)), indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8')) for key in sorted(vars(args).keys()): logging.info('ARGS: ' + key + ': ' + str(vars(args)[key])) reporter = model.reporter # check the use of multi-gpu if args.ngpu > 1: if args.batch_size != 0: logging.warning('batch size is automatically increased (%d -> %d)' % ( args.batch_size, args.batch_size * args.ngpu)) args.batch_size *= args.ngpu if args.num_encs > 1: # TODO(ruizhili): implement data parallel for multi-encoder setup. raise NotImplementedError(""Data parallel is not supported for multi-encoder setup."") # set torch device device = torch.device(""cuda"" if args.ngpu > 0 else ""cpu"") if args.train_dtype in (""float16"", ""float32"", ""float64""): dtype = getattr(torch, args.train_dtype) else: dtype = torch.float32 model = model.to(device=device, dtype=dtype) # Setup an optimizer if args.opt == 'adadelta': optimizer = torch.optim.Adadelta( model.parameters(), rho=0.95, eps=args.eps, weight_decay=args.weight_decay) elif args.opt == 'adam': optimizer = torch.optim.Adam(model.parameters(), weight_decay=args.weight_decay) elif args.opt == 'noam': from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt optimizer = get_std_opt(model, args.adim, args.transformer_warmup_steps, args.transformer_lr) else: raise NotImplementedError(""unknown optimizer: "" + args.opt) # setup apex.amp if args.train_dtype in (""O0"", ""O1"", ""O2"", ""O3""): try: from apex import amp except ImportError as e: logging.error(f""You need to install apex for --train-dtype {args.train_dtype}. "" ""See https://github.com/NVIDIA/apex#linux"") raise e if args.opt == 'noam': model, optimizer.optimizer = amp.initialize(model, optimizer.optimizer, opt_level=args.train_dtype) else: model, optimizer = amp.initialize(model, optimizer, opt_level=args.train_dtype) use_apex = True from espnet.nets.pytorch_backend.ctc import CTC amp.register_float_function(CTC, ""loss_fn"") amp.init() logging.warning('register ctc as float function') else: use_apex = False # FIXME: TOO DIRTY HACK setattr(optimizer, ""target"", reporter) setattr(optimizer, ""serialize"", lambda s: reporter.serialize(s)) # Setup a converter if args.num_encs == 1: converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype) else: converter = CustomConverterMulEnc([i[0] for i in model.subsample_list], dtype=dtype) # read json data with open(args.train_json, 'rb') as f: train_json = json.load(f)['utts'] with open(args.valid_json, 'rb') as f: valid_json = json.load(f)['utts'] use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0 # make minibatch list (variable length) train = make_batchset(train_json, args.batch_size, args.maxlen_in, args.maxlen_out, args.minibatches, min_batch_size=args.ngpu if args.ngpu > 1 else 1, shortest_first=use_sortagrad, count=args.batch_count, batch_bins=args.batch_bins, batch_frames_in=args.batch_frames_in, batch_frames_out=args.batch_frames_out, batch_frames_inout=args.batch_frames_inout, iaxis=0, oaxis=0) valid = make_batchset(valid_json, args.batch_size, args.maxlen_in, args.maxlen_out, args.minibatches, min_batch_size=args.ngpu if args.ngpu > 1 else 1, count=args.batch_count, batch_bins=args.batch_bins, batch_frames_in=args.batch_frames_in, batch_frames_out=args.batch_frames_out, batch_frames_inout=args.batch_frames_inout, iaxis=0, oaxis=0) load_tr = LoadInputsAndTargets( mode='asr', load_output=True, preprocess_conf=args.preprocess_conf, preprocess_args={'train': True} # Switch the mode of preprocessing ) load_cv = LoadInputsAndTargets( mode='asr', load_output=True, preprocess_conf=args.preprocess_conf, preprocess_args={'train': False} # Switch the mode of preprocessing ) # hack to make batchsize argument as 1 # actual bathsize is included in a list # default collate function converts numpy array to pytorch tensor # we used an empty collate function instead which returns list train_iter = {'main': ChainerDataLoader( dataset=TransformDataset(train, lambda data: converter([load_tr(data)])), batch_size=1, num_workers=args.n_iter_processes, shuffle=not use_sortagrad, collate_fn=lambda x: x[0])} valid_iter = {'main': ChainerDataLoader( dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])), batch_size=1, shuffle=False, collate_fn=lambda x: x[0], num_workers=args.n_iter_processes)} # Set up a trainer updater = CustomUpdater( model, args.grad_clip, train_iter, optimizer, device, args.ngpu, args.grad_noise, args.accum_grad, use_apex=use_apex) trainer = training.Trainer( updater, (args.epochs, 'epoch'), out=args.outdir) if use_sortagrad: trainer.extend(ShufflingEnabler([train_iter]), trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, 'epoch')) # Resume from a snapshot if args.resume: logging.info('resumed from %s' % args.resume) torch_resume(args.resume, trainer) # Evaluate the model with the test dataset for each epoch if args.save_interval_iters > 0: trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu), trigger=(args.save_interval_iters, 'iteration')) else: trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu)) # Save attention weight each epoch if args.num_save_attention > 0 and args.mtlalpha != 1.0: data = sorted(list(valid_json.items())[:args.num_save_attention], key=lambda x: int(x[1]['input'][0]['shape'][1]), reverse=True) if hasattr(model, ""module""): att_vis_fn = model.module.calculate_all_attentions plot_class = model.module.attention_plot_class else: att_vis_fn = model.calculate_all_attentions plot_class = model.attention_plot_class att_reporter = plot_class( att_vis_fn, data, args.outdir + ""/att_ws"", converter=converter, transform=load_cv, device=device) trainer.extend(att_reporter, trigger=(1, 'epoch')) else: att_reporter = None # Make a plot for training and validation values if args.num_encs > 1: report_keys_loss_ctc = ['main/loss_ctc{}'.format(i + 1) for i in range(model.num_encs)] + [ 'validation/main/loss_ctc{}'.format(i + 1) for i in range(model.num_encs)] report_keys_cer_ctc = ['main/cer_ctc{}'.format(i + 1) for i in range(model.num_encs)] + [ 'validation/main/cer_ctc{}'.format(i + 1) for i in range(model.num_encs)] trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss', 'main/loss_ctc', 'validation/main/loss_ctc', 'main/loss_att', 'validation/main/loss_att'] + ([] if args.num_encs == 1 else report_keys_loss_ctc), 'epoch', file_name='loss.png')) trainer.extend(extensions.PlotReport(['main/acc', 'validation/main/acc'], 'epoch', file_name='acc.png')) trainer.extend(extensions.PlotReport( ['main/cer_ctc', 'validation/main/cer_ctc'] + ([] if args.num_encs == 1 else report_keys_loss_ctc), 'epoch', file_name='cer.png')) # Save best models trainer.extend(snapshot_object(model, 'model.loss.best'), trigger=training.triggers.MinValueTrigger('validation/main/loss')) if mtl_mode != 'ctc': trainer.extend(snapshot_object(model, 'model.acc.best'), trigger=training.triggers.MaxValueTrigger('validation/main/acc')) # save snapshot which contains model and optimizer states if args.save_interval_iters > 0: trainer.extend(torch_snapshot(filename='snapshot.iter.{.updater.iteration}'), trigger=(args.save_interval_iters, 'iteration')) else: trainer.extend(torch_snapshot(), trigger=(1, 'epoch')) # epsilon decay in the optimizer if args.opt == 'adadelta': if args.criterion == 'acc' and mtl_mode != 'ctc': trainer.extend(restore_snapshot(model, args.outdir + '/model.acc.best', load_fn=torch_load), trigger=CompareValueTrigger( 'validation/main/acc', lambda best_value, current_value: best_value > current_value)) trainer.extend(adadelta_eps_decay(args.eps_decay), trigger=CompareValueTrigger( 'validation/main/acc', lambda best_value, current_value: best_value > current_value)) elif args.criterion == 'loss': trainer.extend(restore_snapshot(model, args.outdir + '/model.loss.best', load_fn=torch_load), trigger=CompareValueTrigger( 'validation/main/loss', lambda best_value, current_value: best_value < current_value)) trainer.extend(adadelta_eps_decay(args.eps_decay), trigger=CompareValueTrigger( 'validation/main/loss', lambda best_value, current_value: best_value < current_value)) # Write a log of evaluation statistics for each epoch trainer.extend(extensions.LogReport(trigger=(args.report_interval_iters, 'iteration'))) report_keys = ['epoch', 'iteration', 'main/loss', 'main/loss_ctc', 'main/loss_att', 'validation/main/loss', 'validation/main/loss_ctc', 'validation/main/loss_att', 'main/acc', 'validation/main/acc', 'main/cer_ctc', 'validation/main/cer_ctc', 'elapsed_time'] + ([] if args.num_encs == 1 else report_keys_cer_ctc + report_keys_loss_ctc) if args.opt == 'adadelta': trainer.extend(extensions.observe_value( 'eps', lambda trainer: trainer.updater.get_optimizer('main').param_groups[0][""eps""]), trigger=(args.report_interval_iters, 'iteration')) report_keys.append('eps') if args.report_cer: report_keys.append('validation/main/cer') if args.report_wer: report_keys.append('validation/main/wer') trainer.extend(extensions.PrintReport( report_keys), trigger=(args.report_interval_iters, 'iteration')) trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters)) set_early_stop(trainer, args) if args.tensorboard_dir is not None and args.tensorboard_dir != """": trainer.extend(TensorboardLogger(SummaryWriter(args.tensorboard_dir), att_reporter), trigger=(args.report_interval_iters, ""iteration"")) # Run the training trainer.run() check_early_stop(trainer, args.epochs) " 1745,"def top_k_accuracy_score(y_true, y_score, k=5, normalize=True): """"""Top k Accuracy classification score. This metric computes the number of times where the correct label is among the top ``k`` labels predicted (ranked by predicted scores). Note that multilabel classification case isn't handled here. Parameters ---------- y_true : array-like of shape (n_samples,) True labels. y_score : array-like of shape (n_samples, n_classes) Target scores. k : int, optional (default=5) Number of guesses allowed to find the correct label. normalize : bool, optional (default=True) If ``True``, return the fraction of correctly classified samples. Otherwise, return the number of correctly classified samples. Returns ------- score : float The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- accuracy_score Notes ----- If ``k = 1``, the result will be the same as the accuracy_score (though see note below). If ``k`` is the same as the number of classes, this score will be perfect and meaningless. In cases where two or more labels are assigned equal probabilities, the result may be incorrect if one of those labels falls at the threshold, as one class must be chosen to be the ``k``th class and the class chosen may not be the correct one. Examples -------- >>> import numpy as np >>> from sklearn.metrics import top_k_accuracy_score >>> y_true = np.array([0, 1, 2, 2]) >>> y_score = np.array([[0.5, 0.2, 0.2], ... [0.3, 0.4, 0.2], ... [0.2, 0.4, 0.3], ... [0.7, 0.2, 0.1]]) >>> top_k_accuracy_score(y_true, y_score, k=1) 0.5 >>> top_k_accuracy_score(y_true, y_score, k=2) 0.75 >>> top_k_accuracy_score(y_true, y_score, k=2, normalize=False) 3 >>> top_k_accuracy_score(y_true, y_score, k=3) 1.0 """""" check_consistent_length(y_true, y_score) y_type = type_of_target(y_true) if y_type != 'multiclass': raise ValueError(f""Target type must be 'multiclass' not {y_type}"") y_true = column_or_1d(y_true) y_score = check_array(y_score) classes = _encode(y_true) if len(classes) != y_score.shape[1]: raise ValueError( ""Number of classes in y_true not equal to the number of columns "" ""in 'y_score'"" ) sorted_pred = np.argsort(-y_score, axis=1) score = sum(y in pred[:k] for y, pred in zip(y_true, sorted_pred)) score = score / len(y_true) if normalize else score return score ","def top_k_accuracy_score(y_true, y_score, k=5, normalize=True): """"""Top k Accuracy classification score. This metric computes the number of times where the correct label is among the top ``k`` labels predicted (ranked by predicted scores). Note that multilabel classification case isn't handled here. Parameters ---------- y_true : array-like of shape (n_samples,) True labels. y_score : array-like of shape (n_samples, n_classes) Target scores. k : int, optional (default=5) Number of guesses allowed to find the correct label. normalize : bool, optional (default=True) If ``True``, return the fraction of correctly classified samples. Otherwise, return the number of correctly classified samples. Returns ------- score : float The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- accuracy_score Notes ----- If ``k = 1``, the result will be the same as the accuracy_score (though see note below). If ``k`` is the same as the number of classes, this score will be perfect and meaningless. In cases where two or more labels are assigned equal probabilities, the result may be incorrect if one of those labels falls at the threshold, as one class must be chosen to be the ``k``th class and the class chosen may not be the correct one. Examples -------- >>> import numpy as np >>> from sklearn.metrics import top_k_accuracy_score >>> y_true = np.array([0, 1, 2, 2]) >>> y_score = np.array([[0.5, 0.2, 0.2], ... [0.3, 0.4, 0.2], ... [0.2, 0.4, 0.3], ... [0.7, 0.2, 0.1]]) >>> top_k_accuracy_score(y_true, y_score, k=1) 0.5 >>> top_k_accuracy_score(y_true, y_score, k=2) 0.75 >>> top_k_accuracy_score(y_true, y_score, k=2, normalize=False) 3 >>> top_k_accuracy_score(y_true, y_score, k=3) 1.0 """""" check_consistent_length(y_true, y_score) y_type = type_of_target(y_true) if y_type != 'multiclass': raise ValueError(f""Target type must be 'multiclass', got {y_type} instead"") y_true = column_or_1d(y_true) y_score = check_array(y_score) classes = _encode(y_true) if len(classes) != y_score.shape[1]: raise ValueError( ""Number of classes in y_true not equal to the number of columns "" ""in 'y_score'"" ) sorted_pred = np.argsort(-y_score, axis=1) score = sum(y in pred[:k] for y, pred in zip(y_true, sorted_pred)) score = score / len(y_true) if normalize else score return score " 33002,"def dot_scene_node_export( ob, path, doc=None, rex=None, exported_meshes=[], meshes=[], mesh_collision_prims={}, mesh_collision_files={}, prefix='', objects=[], xmlparent=None ): o = _ogre_node_helper( doc, ob ) xmlparent.appendChild(o) if config.get('EXPORT_USER'): # Custom user props if len(ob.items()) + len(ob.game.properties) > 0: user = doc.createElement('userData') o.appendChild(user) for prop in ob.items(): propname, propvalue = prop if not propname.startswith('_'): _property_helper(doc, user, propname, propvalue) # Custom user props from BGE props by Mind Calamity for prop in ob.game.properties: _property_helper(doc, user, prop.name, prop.value) # BGE subset if len(ob.game.sensors) + len(ob.game.actuators) > 0: game = doc.createElement('game') o.appendChild( game ) sens = doc.createElement('sensors') game.appendChild( sens ) acts = doc.createElement('actuators') game.appendChild( acts ) for sen in ob.game.sensors: sens.appendChild( WrapSensor(sen).xml(doc) ) for act in ob.game.actuators: acts.appendChild( WrapActuator(act).xml(doc) ) if ob.type == 'MESH': # ob.data.tessfaces is empty. always until the following call ob.data.update(calc_tessface=True) # if it has no faces at all, the object itself will not be exported, BUT # it might have children if ob.type == 'MESH' and len(ob.data.tessfaces): collisionFile = None collisionPrim = None if ob.data.name in mesh_collision_prims: collisionPrim = mesh_collision_prims[ ob.data.name ] if ob.data.name in mesh_collision_files: collisionFile = mesh_collision_files[ ob.data.name ] # Print a warning if there are no UV Maps created for the object # and the user requested to have tangents generated # (they won't be without a UV Map) if int(config.get(""GENERATE_TANGENTS"")) != 0 and len(ob.data.uv_layers) == 0: logger.warning(""No UV Maps were created for this object: <%s>, tangents won't be exported."" % ob.name) Report.warnings.append( 'Object ""%s"" has no UV Maps, tangents won\'t be exported.' % ob.name ) e = doc.createElement('entity') o.appendChild(e); e.setAttribute('name', ob.name) prefix = '' e.setAttribute('meshFile', '%s%s.mesh' % (prefix, clean_object_name(ob.data.name)) ) if not collisionPrim and not collisionFile: if ob.game.use_collision_bounds: collisionPrim = ob.game.collision_bounds_type.lower() mesh_collision_prims[ ob.data.name ] = collisionPrim else: for child in ob.children: if child.subcollision and child.name.startswith('DECIMATE'): collisionFile = '%s_collision_%s.mesh' % (prefix, ob.data.name) break if collisionFile: mesh_collision_files[ ob.data.name ] = collisionFile mesh.dot_mesh(child, path, force_name='%s_collision_%s' % (prefix, ob.data.name) ) skeleton.dot_skeleton(child, path) if collisionPrim: e.setAttribute('collisionPrim', collisionPrim ) elif collisionFile: e.setAttribute('collisionFile', collisionFile ) if config.get('EXPORT_USER'): _mesh_entity_helper( doc, ob, e ) # export mesh.xml file of this object if config.get('MESH') and ob.data.name not in exported_meshes: exists = os.path.isfile( join( path, '%s.mesh' % ob.data.name ) ) overwrite = not exists or (exists and config.get(""MESH_OVERWRITE"")) tangents = int(config.get(""GENERATE_TANGENTS"")) mesh.dot_mesh(ob, path, overwrite=overwrite, tangents=tangents) skeleton.dot_skeleton(ob, path, overwrite=overwrite) exported_meshes.append( ob.data.name ) # Deal with Array modifier vecs = [ ob.matrix_world.to_translation() ] for mod in ob.modifiers: if mod.type == 'ARRAY': if mod.fit_type != 'FIXED_COUNT': logger.warning(""<%s> Unsupported array-modifier type: %s, only 'Fixed Count' is supported"" % (ob.name, mod.fit_type)) Report.warnings.append(""Object \""%s\"" has unsupported array-modifier type: %s, only 'Fixed Count' is supported"" % (ob.name, mod.fit_type)) continue if not mod.use_constant_offset: logger.warning(""<%s> Unsupported array-modifier mode, must be of 'Constant Offset' type"" % ob.name) Report.warnings.append(""Object \""%s\"" has unsupported array-modifier mode, must be of 'Constant Offset' type"" % ob.name) continue else: #v = ob.matrix_world.to_translation() newvecs = [] for prev in vecs: for i in range( mod.count-1 ): v = prev + mod.constant_offset_displace newvecs.append( v ) ao = _ogre_node_helper( doc, ob, prefix='_array_%s_'%len(vecs+newvecs), pos=v ) xmlparent.appendChild(ao) e = doc.createElement('entity') ao.appendChild(e); e.setAttribute('name', ob.data.name) e.setAttribute('meshFile', '%s.mesh' % clean_object_name(ob.data.name)) if collisionPrim: e.setAttribute('collisionPrim', collisionPrim ) elif collisionFile: e.setAttribute('collisionFile', collisionFile ) vecs += newvecs # Deal with Particle Systems y_rot = mathutils.Quaternion((0.0, 1.0, 0.0), math.radians(90.0)) for partsys in ob.particle_systems: if partsys.settings.type == 'HAIR' and partsys.settings.render_type == 'OBJECT': index = 0 for particle in partsys.particles: dupob = partsys.settings.dupli_object ao = _ogre_node_helper( doc, dupob, prefix='%s_particle_%s_' % (clean_object_name(ob.data.name), index), pos=particle.hair_keys[0].co, rot=(particle.rotation * y_rot), scl=(dupob.scale * particle.size) ) o.appendChild(ao) e = doc.createElement('entity') ao.appendChild(e); e.setAttribute('name', ('%s_particle_%s_%s' % (clean_object_name(ob.data.name), index, clean_object_name(dupob.data.name)))) e.setAttribute('meshFile', '%s.mesh' % clean_object_name(dupob.data.name)) index += 1 else: logger.warn(""<%s> Particle System %s is not supported for export (should be of type: 'Hair' and render_type: 'Object')"" % (ob.name, partsys.name)) Report.warnings.append(""Object \""%s\"" has Particle System: \""%s\"" not supported for export (should be of type: 'Hair' and render_type: 'Object')"" % (ob.name, partsys.name)) elif ob.type == 'CAMERA': Report.cameras.append( ob.name ) c = doc.createElement('camera') o.appendChild(c); c.setAttribute('name', ob.data.name) aspx = bpy.context.scene.render.pixel_aspect_x aspy = bpy.context.scene.render.pixel_aspect_y sx = bpy.context.scene.render.resolution_x sy = bpy.context.scene.render.resolution_y if ob.data.type == ""PERSP"": fovY = 0.0 if (sx*aspx > sy*aspy): fovY = 2*math.atan(sy*aspy*16.0/(ob.data.lens*sx*aspx)) else: fovY = 2*math.atan(16.0/ob.data.lens) # fov in radians - like OgreMax - requested by cyrfer fov = math.radians( fovY*180.0/math.pi ) c.setAttribute('projectionType', ""perspective"") c.setAttribute('fov', '%6f' % fov) else: # ob.data.type == ""ORTHO"": c.setAttribute('projectionType', ""orthographic"") c.setAttribute('orthoScale', '%6f' % ob.data.ortho_scale) a = doc.createElement('clipping'); c.appendChild( a ) a.setAttribute('near', '%6f' % ob.data.clip_start) # requested by cyrfer a.setAttribute('far', '%6f' % ob.data.clip_end) elif ob.type == 'LAMP' and ob.data.type in 'POINT SPOT SUN'.split(): Report.lights.append( ob.name ) l = doc.createElement('light') o.appendChild(l) if ob.data.type == 'POINT': l.setAttribute('type', 'point') elif ob.data.type == 'SPOT': l.setAttribute('type', 'spot') elif ob.data.type == 'SUN': l.setAttribute('type', 'directional') l.setAttribute('name', ob.name ) l.setAttribute('powerScale', str(ob.data.energy)) if ob.data.use_diffuse: a = doc.createElement('colourDiffuse'); l.appendChild( a ) a.setAttribute('r', '%6f' % ob.data.color.r) a.setAttribute('g', '%6f' % ob.data.color.g) a.setAttribute('b', '%6f' % ob.data.color.b) if ob.data.use_specular: a = doc.createElement('colourSpecular'); l.appendChild( a ) a.setAttribute('r', '%6f' % ob.data.color.r) a.setAttribute('g', '%6f' % ob.data.color.g) a.setAttribute('b', '%6f' % ob.data.color.b) if ob.data.type == 'SPOT': a = doc.createElement('lightRange') l.appendChild(a) a.setAttribute('inner',str( ob.data.spot_size*(1.0-ob.data.spot_blend) )) a.setAttribute('outer',str(ob.data.spot_size)) a.setAttribute('falloff','1.0') a = doc.createElement('lightAttenuation'); l.appendChild( a ) a.setAttribute('range', '5000' ) # is this an Ogre constant? a.setAttribute('constant', '1.0') # TODO support quadratic light a.setAttribute('linear', '%6f' % (1.0 / ob.data.distance)) a.setAttribute('quadratic', '0.0') # Node Animation if config.get('NODE_ANIMATION'): node_anim.dot_nodeanim(ob, doc, o) for child in ob.children: dot_scene_node_export( child, path, doc = doc, rex = rex, exported_meshes = exported_meshes, meshes = meshes, mesh_collision_prims = mesh_collision_prims, mesh_collision_files = mesh_collision_files, prefix = prefix, objects=objects, xmlparent=o ) ","def dot_scene_node_export( ob, path, doc=None, rex=None, exported_meshes=[], meshes=[], mesh_collision_prims={}, mesh_collision_files={}, prefix='', objects=[], xmlparent=None ): o = _ogre_node_helper( doc, ob ) xmlparent.appendChild(o) if config.get('EXPORT_USER'): # Custom user props if len(ob.items()) + len(ob.game.properties) > 0: user = doc.createElement('userData') o.appendChild(user) for prop in ob.items(): propname, propvalue = prop if not propname.startswith('_'): _property_helper(doc, user, propname, propvalue) # Custom user props from BGE props by Mind Calamity for prop in ob.game.properties: _property_helper(doc, user, prop.name, prop.value) # BGE subset if len(ob.game.sensors) + len(ob.game.actuators) > 0: game = doc.createElement('game') o.appendChild( game ) sens = doc.createElement('sensors') game.appendChild( sens ) acts = doc.createElement('actuators') game.appendChild( acts ) for sen in ob.game.sensors: sens.appendChild( WrapSensor(sen).xml(doc) ) for act in ob.game.actuators: acts.appendChild( WrapActuator(act).xml(doc) ) if ob.type == 'MESH': # ob.data.tessfaces is empty. always until the following call ob.data.update(calc_tessface=True) # if it has no faces at all, the object itself will not be exported, BUT # it might have children if ob.type == 'MESH' and len(ob.data.tessfaces): collisionFile = None collisionPrim = None if ob.data.name in mesh_collision_prims: collisionPrim = mesh_collision_prims[ ob.data.name ] if ob.data.name in mesh_collision_files: collisionFile = mesh_collision_files[ ob.data.name ] # Print a warning if there are no UV Maps created for the object # and the user requested to have tangents generated # (they won't be without a UV Map) if int(config.get(""GENERATE_TANGENTS"")) != 0 and len(ob.data.uv_layers) == 0: logger.warning(""No UV Maps were created for this object: <%s>, tangents won't be exported."" % ob.name) Report.warnings.append( 'Object ""%s"" has no UV Maps, tangents won\'t be exported.' % ob.name ) e = doc.createElement('entity') o.appendChild(e); e.setAttribute('name', ob.name) prefix = '' e.setAttribute('meshFile', '%s%s.mesh' % (prefix, clean_object_name(ob.data.name)) ) if not collisionPrim and not collisionFile: if ob.game.use_collision_bounds: collisionPrim = ob.game.collision_bounds_type.lower() mesh_collision_prims[ ob.data.name ] = collisionPrim else: for child in ob.children: if child.subcollision and child.name.startswith('DECIMATE'): collisionFile = '%s_collision_%s.mesh' % (prefix, ob.data.name) break if collisionFile: mesh_collision_files[ ob.data.name ] = collisionFile mesh.dot_mesh(child, path, force_name='%s_collision_%s' % (prefix, ob.data.name) ) skeleton.dot_skeleton(child, path) if collisionPrim: e.setAttribute('collisionPrim', collisionPrim ) elif collisionFile: e.setAttribute('collisionFile', collisionFile ) if config.get('EXPORT_USER'): _mesh_entity_helper( doc, ob, e ) # export mesh.xml file of this object if config.get('MESH') and ob.data.name not in exported_meshes: exists = os.path.isfile( join( path, '%s.mesh' % ob.data.name ) ) overwrite = not exists or (exists and config.get(""MESH_OVERWRITE"")) tangents = int(config.get(""GENERATE_TANGENTS"")) mesh.dot_mesh(ob, path, overwrite=overwrite, tangents=tangents) skeleton.dot_skeleton(ob, path, overwrite=overwrite) exported_meshes.append( ob.data.name ) # Deal with Array modifier vecs = [ ob.matrix_world.to_translation() ] for mod in ob.modifiers: if mod.type == 'ARRAY': if mod.fit_type != 'FIXED_COUNT': logger.warning(""<%s> Unsupported array-modifier type: %s, only 'Fixed Count' is supported"" % (ob.name, mod.fit_type)) Report.warnings.append(""Object \""%s\"" has unsupported array-modifier type: %s, only 'Fixed Count' is supported"" % (ob.name, mod.fit_type)) continue if not mod.use_constant_offset: logger.warning(""<%s> Unsupported array-modifier mode, must be of 'Constant Offset' type"" % ob.name) Report.warnings.append(""Object \""%s\"" has unsupported array-modifier mode, must be of 'Constant Offset' type"" % ob.name) continue else: #v = ob.matrix_world.to_translation() newvecs = [] for prev in vecs: for i in range( mod.count-1 ): v = prev + mod.constant_offset_displace newvecs.append( v ) ao = _ogre_node_helper( doc, ob, prefix='_array_%s_'%len(vecs+newvecs), pos=v ) xmlparent.appendChild(ao) e = doc.createElement('entity') ao.appendChild(e); e.setAttribute('name', ob.data.name) e.setAttribute('meshFile', '%s.mesh' % clean_object_name(ob.data.name)) if collisionPrim: e.setAttribute('collisionPrim', collisionPrim ) elif collisionFile: e.setAttribute('collisionFile', collisionFile ) vecs += newvecs # Deal with Particle Systems y_rot = mathutils.Quaternion((0.0, 1.0, 0.0), math.radians(90.0)) for partsys in ob.particle_systems: if partsys.settings.type == 'HAIR' and partsys.settings.render_type == 'OBJECT': index = 0 for particle in partsys.particles: dupob = partsys.settings.dupli_object ao = _ogre_node_helper( doc, dupob, prefix='%s_particle_%s_' % (clean_object_name(ob.data.name), index), pos=particle.hair_keys[0].co, rot=(particle.rotation * y_rot), scl=(dupob.scale * particle.size) ) o.appendChild(ao) e = doc.createElement('entity') ao.appendChild(e); e.setAttribute('name', ('%s_particle_%s_%s' % (clean_object_name(ob.data.name), index, clean_object_name(dupob.data.name)))) e.setAttribute('meshFile', '%s.mesh' % clean_object_name(dupob.data.name)) index += 1 else: logger.warn(""<%s> Particle System %s is not supported for export (should be of type: 'Hair' and render_type: 'Object')"" % (ob.name, partsys.name)) Report.warnings.append(""Object \""%s\"" has Particle System: \""%s\"" not supported for export (should be of type: 'Hair' and render_type: 'Object')"" % (ob.name, partsys.name)) elif ob.type == 'CAMERA': Report.cameras.append( ob.name ) c = doc.createElement('camera') o.appendChild(c); c.setAttribute('name', ob.data.name) aspx = bpy.context.scene.render.pixel_aspect_x aspy = bpy.context.scene.render.pixel_aspect_y sx = bpy.context.scene.render.resolution_x sy = bpy.context.scene.render.resolution_y if ob.data.type == ""PERSP"": fovY = 0.0 if (sx*aspx > sy*aspy): fovY = 2*math.atan(sy*aspy*16.0/(ob.data.lens*sx*aspx)) else: fovY = 2*math.atan(16.0/ob.data.lens) # fov in radians - like OgreMax - requested by cyrfer fov = math.radians( fovY*180.0/math.pi ) c.setAttribute('projectionType', ""perspective"") c.setAttribute('fov', '%6f' % fov) else: # ob.data.type == ""ORTHO"": c.setAttribute('projectionType', ""orthographic"") c.setAttribute('orthoScale', '%6f' % ob.data.ortho_scale) a = doc.createElement('clipping'); c.appendChild( a ) a.setAttribute('near', '%6f' % ob.data.clip_start) # requested by cyrfer a.setAttribute('far', '%6f' % ob.data.clip_end) elif ob.type == 'LAMP' and ob.data.type in 'POINT SPOT SUN'.split(): Report.lights.append( ob.name ) l = doc.createElement('light') o.appendChild(l) if ob.data.type == 'POINT': l.setAttribute('type', 'point') elif ob.data.type == 'SPOT': l.setAttribute('type', 'spot') elif ob.data.type == 'SUN': l.setAttribute('type', 'directional') l.setAttribute('name', ob.name ) l.setAttribute('powerScale', str(ob.data.energy)) if ob.data.use_diffuse: a = doc.createElement('colourDiffuse'); l.appendChild( a ) a.setAttribute('r', '%3f' % ob.data.color.r) a.setAttribute('g', '%6f' % ob.data.color.g) a.setAttribute('b', '%6f' % ob.data.color.b) if ob.data.use_specular: a = doc.createElement('colourSpecular'); l.appendChild( a ) a.setAttribute('r', '%6f' % ob.data.color.r) a.setAttribute('g', '%6f' % ob.data.color.g) a.setAttribute('b', '%6f' % ob.data.color.b) if ob.data.type == 'SPOT': a = doc.createElement('lightRange') l.appendChild(a) a.setAttribute('inner',str( ob.data.spot_size*(1.0-ob.data.spot_blend) )) a.setAttribute('outer',str(ob.data.spot_size)) a.setAttribute('falloff','1.0') a = doc.createElement('lightAttenuation'); l.appendChild( a ) a.setAttribute('range', '5000' ) # is this an Ogre constant? a.setAttribute('constant', '1.0') # TODO support quadratic light a.setAttribute('linear', '%6f' % (1.0 / ob.data.distance)) a.setAttribute('quadratic', '0.0') # Node Animation if config.get('NODE_ANIMATION'): node_anim.dot_nodeanim(ob, doc, o) for child in ob.children: dot_scene_node_export( child, path, doc = doc, rex = rex, exported_meshes = exported_meshes, meshes = meshes, mesh_collision_prims = mesh_collision_prims, mesh_collision_files = mesh_collision_files, prefix = prefix, objects=objects, xmlparent=o ) " 19888,"def macadam_limits(target_brightness, illuminant=()): """""" whavelenght reaches from 360 to 830 nm, in within the programm it is handled as 0 to 470. Beyond the references this programm is very fast, because the possible optimums are not simply tested step by step but more effectively targeted by steps of power of two. The whavelenghts left and right of a rough optimum are fited by a rule of proportion, so that the wished brightness will be reached exactly. Parameters ---------- target_brightness : floating point brightness has to be between 0 and 1 illuminant: object illuminant must be out of colorimetry.MSDS_CMFS['XXX'] If there is no illuminant or it has the wrong form, the illuminant SDS_ILLUMINANTS['E'] is choosen wich has no influence to the calculations, because it is an equal-energy-spectrum if necessary a third parameter for the colour-matching funciton could easily be implemented Returns ------- an array of CIE -X,Y,Z - Triples for every single whavelength in single nm - Steps in the range from 360 to 830 nm References ---------- - cite: Wyszecki, G., & Stiles, W. S. (2000). In Color Science: Concepts and Methods, Quantitative Data and Formulae (pp. 181–184). Wiley. ISBN:978-0-471-39918-6 - cite: Francisco Martínez-Verdú, Esther Perales, Elisabet Chorro, Dolores de Fez, Valentín Viqueira, and Eduardo Gilabert, ""Computation and visualization of the MacAdam limits for any lightness, hue angle, and light source,"" J. Opt. Soc. Am. A 24, 1501-1515 (2007) - cite: Kenichiro Masaoka. In OPTICS LETTERS, June 15, 2010 / Vol. 35, No. 1 (pp. 2031 - 2033) Example -------- from matplotlib import pyplot as plt import numpy as np import math fig = plt.figure(figsize=(7,7)) ax = fig.add_axes([0,0,1,1]) illuminant = colour.SDS_ILLUMINANTS['D65'] def plot_Narrowband_Spectra (Yxy_Narrowband_Spectra): FirstColumn = 0 SecondColumn = 1 x = Yxy_Narrowband_Spectra[...,FirstColumn] y = Yxy_Narrowband_Spectra[...,SecondColumn] ax.plot(x,y,'orange',label='Spectrum Loci') x = [Yxy_Narrowband_Spectra[-1][FirstColumn], Yxy_Narrowband_Spectra[0][FirstColumn]] y = [Yxy_Narrowband_Spectra[-1][SecondColumn], Yxy_Narrowband_Spectra[0][SecondColumn]] ax.plot(x,y,'purple',label='Purple Boundary') return() for n in range(1, 20): Yxy_Narrowband_Spectra = colour.XYZ_to_xy( colour.macadam_limits(n/20, illuminant) / 100) plot_Narrowband_Spectra (Yxy_Narrowband_Spectra) plt.show() """""" target_bright = target_brightness if target_bright > 1 or target_bright < 0: raise TypeError('brightness of function macadam_limits( )' 'has to be between 0 and 1') standard_cfms = MSDS_CMFS['CIE 1931 2 Degree Standard Observer'] X_cie31 = standard_cfms.values[..., 0] Y_cie31 = standard_cfms.values[..., 1] Z_cie31 = standard_cfms.values[..., 2] try: illuminant.interpolator except AttributeError: illuminant = SDS_ILLUMINANTS['E'] # If there is no illuminant or it has the wrong form, # an illuminant choosen with no influence # If the illuminanats do not match the format of the Standard Observer, # they have to be adaptet illuminant.extrapolate(SpectralShape(360, 830)) illuminant.interpolate(SpectralShape(360, 830, 1)) # The cie31 cmfs are convolved with the given illuminant X_illuminated = X_cie31 * illuminant.values Y_illuminated = Y_cie31 * illuminant.values Z_illuminated = Z_cie31 * illuminant.values # Generate empty output-array out_limits = np.zeros_like(standard_cfms.values) # This Array has 471 entries for whavelenghts from 360 nm to 830 nm opti_colour = np.zeros_like(Y_illuminated) # The array of optimal colours has the same dimensions like Y_illuminated # and all entries are initialy set to zero middle_opti_colour = 235 # is a constant and not be changed. At 595nm (360 + 235) # in the middle of the center_opti_colour-array # be aware that counting in array-positions starts at zero # The first optimum color has its center initialy at zero maximum_brightness = np.sum(Y_illuminated) # ""integral"" over Y_illuminated def optimum_colour(width, center): opti_colour = np.zeros(471) # creates array of 471 zeros and ones which represents optimum-colours # All values of the opti_colour-array are intialy set to zero half_width = width center_opti_colour = center middle_opti_colour = 235 opti_colour[middle_opti_colour - half_width:middle_opti_colour + half_width + 1] = 1 # we start the construction of the optimum color # at the center of the opti_colour-array opti_colour = np.roll(opti_colour, center_opti_colour - middle_opti_colour) # the optimum colour is rolled to the right whavelenght return opti_colour def bright_opti_colour(width, center, lightsource): brightness = np.sum( optimum_colour(width, center) * lightsource) / maximum_brightness return brightness step_size = np.array([64, 32, 16, 8, 4, 2, 1]) for whavelength in range(0, 471): width = 127 for n in step_size: brightness = bright_opti_colour(width, whavelength, Y_illuminated) if brightness > target_bright or width > 234: width -= n else: width += n brightness = bright_opti_colour(width, whavelength, Y_illuminated) if brightness < target_bright: width += 1 brightness = bright_opti_colour(width, whavelength, Y_illuminated) rough_optimum = optimum_colour(width, whavelength) brightness = np.sum(rough_optimum * Y_illuminated) / maximum_brightness # in the following, the both borders of the found rough_optimum # are reduced to get more exact results bright_difference = (brightness - target_bright) * maximum_brightness # discrimination for single-whavelenght-spectra if width > 0: opti_colour = np.zeros(471) opti_colour[middle_opti_colour - width:middle_opti_colour + width + 1] = 1 # instead rolling foreward opti_colour, light is rolled backward rolled_light = np.roll(Y_illuminated, middle_opti_colour - whavelength) opti_colour_light = opti_colour * rolled_light left_opti = opti_colour_light[middle_opti_colour - width] right_opti = opti_colour_light[middle_opti_colour + width] interpolation = 1 - (bright_difference / (left_opti + right_opti)) opti_colour[middle_opti_colour - width] = interpolation opti_colour[middle_opti_colour + width] = interpolation # opti_colour is rolled to right possition final_optimum = np.roll(opti_colour, whavelength - middle_opti_colour) else: final_optimum = rough_optimum / brightness * target_bright out_X = np.sum(final_optimum * X_illuminated) out_Y = target_bright * maximum_brightness out_Z = np.sum(final_optimum * Z_illuminated) triple = np.array([out_X, out_Y, out_Z]) out_limits[whavelength] = triple return (out_limits) ","def macadam_limits(target_brightness, illuminant=()): """""" whavelenght reaches from 360 to 830 nm, in within the programm it is handled as 0 to 470. Beyond the references this programm is very fast, because the possible optimums are not simply tested step by step but more effectively targeted by steps of power of two. The whavelenghts left and right of a rough optimum are fited by a rule of proportion, so that the wished brightness will be reached exactly. Parameters ---------- target_brightness : floating point brightness has to be between 0 and 1 illuminant: object illuminant must be out of colorimetry.MSDS_CMFS['XXX'] If there is no illuminant or it has the wrong form, the illuminant SDS_ILLUMINANTS['E'] is choosen wich has no influence to the calculations, because it is an equal-energy-spectrum if necessary a third parameter for the colour-matching funciton could easily be implemented Returns ------- an array of CIE -X,Y,Z - Triples for every single whavelength in single nm - Steps in the range from 360 to 830 nm References ---------- - cite: Wyszecki, G., & Stiles, W. S. (2000). In Color Science: Concepts and Methods, Quantitative Data and Formulae (pp. 181–184). Wiley. ISBN:978-0-471-39918-6 - cite: Francisco Martínez-Verdú, Esther Perales, Elisabet Chorro, Dolores de Fez, Valentín Viqueira, and Eduardo Gilabert, ""Computation and visualization of the MacAdam limits for any lightness, hue angle, and light source,"" J. Opt. Soc. Am. A 24, 1501-1515 (2007) - cite: Kenichiro Masaoka. In OPTICS LETTERS, June 15, 2010 / Vol. 35, No. 1 (pp. 2031 - 2033) Example -------- from matplotlib import pyplot as plt import numpy as np import math fig = plt.figure(figsize=(7,7)) ax = fig.add_axes([0,0,1,1]) illuminant = colour.SDS_ILLUMINANTS['D65'] def plot_Narrowband_Spectra (Yxy_Narrowband_Spectra): FirstColumn = 0 SecondColumn = 1 x = Yxy_Narrowband_Spectra[...,FirstColumn] y = Yxy_Narrowband_Spectra[...,SecondColumn] ax.plot(x,y,'orange',label='Spectrum Loci') x = [Yxy_Narrowband_Spectra[-1][FirstColumn], Yxy_Narrowband_Spectra[0][FirstColumn]] y = [Yxy_Narrowband_Spectra[-1][SecondColumn], Yxy_Narrowband_Spectra[0][SecondColumn]] ax.plot(x,y,'purple',label='Purple Boundary') return() for n in range(1, 20): Yxy_Narrowband_Spectra = colour.XYZ_to_xy( colour.macadam_limits(n/20, illuminant) / 100) plot_Narrowband_Spectra (Yxy_Narrowband_Spectra) plt.show() """""" target_bright = target_brightness if target_bright > 1 or target_bright < 0: raise TypeError('brightness of function macadam_limits( )' 'has to be between 0 and 1') standard_cfms = MSDS_CMFS['CIE 1931 2 Degree Standard Observer'] X_cie31 = standard_cfms.values[..., 0] Y_cie31 = standard_cfms.values[..., 1] Z_cie31 = standard_cfms.values[..., 2] try: illuminant.interpolator except AttributeError: illuminant = SDS_ILLUMINANTS['E'] # If there is no illuminant or it has the wrong form, # an illuminant choosen with no influence # If the illuminanats do not match the format of the Standard Observer, # they have to be adaptet illuminant.extrapolate(SpectralShape(360, 830)) illuminant.interpolate(SpectralShape(360, 830, 1)) # The cie31 cmfs are convolved with the given illuminant X_illuminated = X_cie31 * illuminant.values Y_illuminated = Y_cie31 * illuminant.values Z_illuminated = Z_cie31 * illuminant.values # Generate empty output-array out_limits = np.zeros_like(standard_cfms.values) # This Array has 471 entries for whavelenghts from 360 nm to 830 nm opti_colour = np.zeros_like(Y_illuminated) # The array of optimal colours has the same dimensions like Y_illuminated # and all entries are initialy set to zero middle_opti_colour = 235 # is a constant and not be changed. At 595nm (360 + 235) # in the middle of the center_opti_colour-array # be aware that counting in array-positions starts at zero # The first optimum color has its center initialy at zero maximum_brightness = np.sum(Y_illuminated) # ""integral"" over Y_illuminated def optimum_colour(width, center): opti_colour = np.zeros(471) # creates array of 471 zeros and ones which represents optimum-colours # All values of the opti_colour-array are intialy set to zero half_width = width center_opti_colour = center middle_opti_colour = 235 opti_colour[middle_opti_colour - half_width:middle_opti_colour + half_width + 1] = 1 # we start the construction of the optimum color # at the center of the opti_colour-array opti_colour = np.roll(opti_colour, center_opti_colour - middle_opti_colour) # the optimum colour is rolled to the right whavelenght return opti_colour def bright_opti_colour(width, center, lightsource): brightness = np.sum( optimum_colour(width, center) * lightsource) / maximum_brightness return brightness step_size = np.array([64, 32, 16, 8, 4, 2, 1]) for whavelength in range(0, 471): width = 127 for n in step_size: brightness = bright_opti_colour(width, whavelength, Y_illuminated) if brightness > target_bright or width > 234: width -= n else: width += n brightness = bright_opti_colour(width, whavelength, Y_illuminated) if brightness < target_bright: width += 1 brightness = bright_opti_colour(width, whavelength, Y_illuminated) rough_optimum = optimum_colour(width, whavelength) brightness = np.sum(rough_optimum * Y_illuminated) / maximum_brightness # in the following, the both borders of the found rough_optimum # are reduced to get more exact results bright_difference = (brightness - target_bright) * maximum_brightness # discrimination for single-whavelenght-spectra if width > 0: opti_colour = np.zeros(471) opti_colour[middle_opti_colour - width:middle_opti_colour + width + 1] = 1 # instead rolling foreward opti_colour, light is rolled backward rolled_light = np.roll(Y_illuminated, middle_opti_colour - wavelength) opti_colour_light = opti_colour * rolled_light left_opti = opti_colour_light[middle_opti_colour - width] right_opti = opti_colour_light[middle_opti_colour + width] interpolation = 1 - (bright_difference / (left_opti + right_opti)) opti_colour[middle_opti_colour - width] = interpolation opti_colour[middle_opti_colour + width] = interpolation # opti_colour is rolled to right possition final_optimum = np.roll(opti_colour, whavelength - middle_opti_colour) else: final_optimum = rough_optimum / brightness * target_bright out_X = np.sum(final_optimum * X_illuminated) out_Y = target_bright * maximum_brightness out_Z = np.sum(final_optimum * Z_illuminated) triple = np.array([out_X, out_Y, out_Z]) out_limits[whavelength] = triple return (out_limits) " 32589,"def getexternalservices_command(client: Client, args: Dict[str, Any]) -> CommandResults: """""" asm-getexternalservices command: Returns list of external services. Args: client (Client): CortexAttackSurfaceManagment client to use. args (dict): all command arguments, usually passed from ``demisto.args()``. ``args['ip_address']`` IP Address to search on. ``args['domain']`` Domain to search on. ``args['is_active']`` If the service active or not. ``args['discovery_type']`` how service was discovered. Returns: CommandResults: A ``CommandResults`` object that is then passed to ``return_results``, that contains external services. """""" ip_address = args.get('ip_address') domain = args.get('domain') is_active = args.get('is_active') discovery_type = args.get('discovery_type') # create list of search parameters or pass empty list. search_params = [] if ip_address: search_params.append({""field"": ""ip_address"", ""operator"": ""eq"", ""value"": ip_address}) if domain: search_params.append({""field"": ""domain"", ""operator"": ""contains"", ""value"": domain}) if is_active: search_params.append({""field"": ""is_active"", ""operator"": ""in"", ""value"": [is_active]}) if discovery_type: search_params.append({""field"": ""discovery_type"", ""operator"": ""in"", ""value"": [discovery_type]}) response = client.getexternalservices_request(search_params) parsed = response['reply']['external_services'] markdown = tableToMarkdown('External Services', parsed, removeNull=True) command_results = CommandResults( outputs_prefix='ASM.GetExternalServices', outputs_key_field='service_id', outputs=parsed, raw_response=parsed, readable_output=markdown ) return command_results ","def getexternalservices_command(client: Client, args: Dict[str, Any]) -> CommandResults: """""" asm-getexternalservices command: Returns list of external services. Args: client (Client): CortexAttackSurfaceManagment client to use. args (dict): all command arguments, usually passed from ``demisto.args()``. ``args['ip_address']`` IP Address to search on. ``args['domain']`` Domain to search on. ``args['is_active']`` If the service active or not. ``args['discovery_type']`` how service was discovered. Returns: CommandResults: A ``CommandResults`` object that is then passed to ``return_results``, that contains external services. """""" ip_address = args.get('ip_address') domain = args.get('domain') is_active = args.get('is_active') discovery_type = args.get('discovery_type') # create list of search parameters or pass empty list. search_params = [] if ip_address: search_params.append({""field"": ""ip_address"", ""operator"": ""eq"", ""value"": ip_address}) if domain: search_params.append({""field"": ""domain"", ""operator"": ""contains"", ""value"": domain}) if is_active: search_params.append({""field"": ""is_active"", ""operator"": ""in"", ""value"": [is_active]}) if discovery_type: search_params.append({""field"": ""discovery_type"", ""operator"": ""in"", ""value"": [discovery_type]}) response = client.getexternalservices_request(search_params) parsed = response['reply']['external_services'] markdown = tableToMarkdown('External Services', parsed, removeNull=True) command_results = CommandResults( outputs_prefix='ASM.GetExternalServices', outputs_key_field='service_id', outputs=parsed, raw_response=response, readable_output=markdown ) return command_results " 27955,"def perform_analysis(args, skip_handler, context, actions, metadata): """""" Perform static analysis via the given (or if not, all) analyzers, in the given analysis context for the supplied build actions. Additionally, insert statistical information into the metadata dict. """""" analyzers = args.analyzers if 'analyzers' in args \ else analyzer_types.supported_analyzers analyzers, _ = analyzer_types.check_supported_analyzers( analyzers, context) ctu_collect = False ctu_analyze = False ctu_dir = '' if 'ctu_phases' in args: ctu_dir = os.path.join(args.output_path, 'ctu-dir') args.ctu_dir = ctu_dir if ClangSA.ANALYZER_NAME not in analyzers: LOG.error(""CTU can only be used with the clang static analyzer."") return ctu_collect = args.ctu_phases[0] ctu_analyze = args.ctu_phases[1] if 'stats_enabled' in args and args.stats_enabled: if ClangSA.ANALYZER_NAME not in analyzers: LOG.debug(""Statistics can only be used with "" ""the Clang Static Analyzer."") return actions = prepare_actions(actions, analyzers) config_map = analyzer_types.build_config_handlers(args, context, analyzers) available_checkers = set() # Add profile names to the checkers list so we will not warn # if a profile is enabled but there is no checker with that name. available_checkers.update(context.available_profiles.keys()) # Collect all the available checkers from the enabled analyzers. for analyzer in config_map.items(): _, analyzer_cfg = analyzer for analyzer_checker in analyzer_cfg.checks().items(): checker_name, _ = analyzer_checker available_checkers.add(checker_name) if 'ordered_checkers' in args: missing_checkers = checkers.available(args.ordered_checkers, available_checkers) if missing_checkers: LOG.warning(""No checker(s) with these names was found:\n%s"", '\n'.join(missing_checkers)) LOG.warning(""Please review the checker names.\n"" ""In the next release the analysis will not start"" "" with invalid checker names."") if 'stats_enabled' in args: config_map[ClangSA.ANALYZER_NAME].set_checker_enabled( SpecialReturnValueCollector.checker_analyze) config_map[ClangSA.ANALYZER_NAME].set_checker_enabled( ReturnValueCollector.checker_analyze) # Statistics collector checkers must be explicitly disabled # as they trash the output. if ""clangsa"" in analyzers: config_map[ClangSA.ANALYZER_NAME].set_checker_enabled( SpecialReturnValueCollector.checker_collect, False) config_map[ClangSA.ANALYZER_NAME].set_checker_enabled( ReturnValueCollector.checker_collect, False) # Save some metadata information. versions = __get_analyzer_version(context, config_map) metadata['versions'].update(versions) metadata['checkers'] = {} for analyzer in analyzers: metadata['checkers'][analyzer] = {} for check, data in config_map[analyzer].checks().items(): enabled, _ = data metadata['checkers'][analyzer].update({check: enabled}) if ctu_collect: shutil.rmtree(ctu_dir, ignore_errors=True) elif ctu_analyze and not os.path.exists(ctu_dir): LOG.error(""CTU directory: '%s' does not exist."", ctu_dir) return start_time = time.time() # Use Manager to create data objects which can be # safely shared between processes. manager = SyncManager() manager.start(__mgr_init) config_map = manager.dict(config_map) actions_map = create_actions_map(actions, manager) # Setting to not None value will enable statistical analysis features. statistics_data = __get_statistics_data(args, manager) if ctu_collect or statistics_data: ctu_data = None if ctu_collect or ctu_analyze: ctu_capability = config_map[ClangSA.ANALYZER_NAME].ctu_capability ctu_data = manager.dict({'ctu_dir': ctu_dir, 'ctu_func_map_cmd': ctu_capability.mapping_tool_path, 'ctu_func_map_file': ctu_capability.mapping_file_name, 'ctu_temp_fnmap_folder': 'tmpExternalFnMaps'}) pre_analyze = [a for a in actions if a.analyzer_type == ClangSA.ANALYZER_NAME] pre_analysis_manager.run_pre_analysis(pre_analyze, context, config_map, args.jobs, skip_handler, ctu_data, statistics_data, manager) if 'stats_output' in args and args.stats_output: return if 'stats_dir' in args and args.stats_dir: statistics_data = manager.dict({'stats_out_dir': args.stats_dir}) ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \ args.ctu_reanalyze_on_failure if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect): LOG.info(""Starting static analysis ..."") analysis_manager.start_workers(actions_map, actions, context, config_map, args.jobs, args.output_path, skip_handler, metadata, 'quiet' in args, 'capture_analysis_output' in args, args.timeout if 'timeout' in args else None, ctu_reanalyze_on_failure, statistics_data, manager) LOG.info(""Analysis finished."") LOG.info(""To view results in the terminal use the "" ""\""CodeChecker parse\"" command."") LOG.info(""To store results use the \""CodeChecker store\"" command."") LOG.info(""See --help and the user guide for further options about"" "" parsing and storing the reports."") LOG.info(""----=================----"") end_time = time.time() LOG.info(""Analysis length: %s sec."", end_time - start_time) metadata['timestamps'] = {'begin': start_time, 'end': end_time} if ctu_collect and ctu_analyze: shutil.rmtree(ctu_dir, ignore_errors=True) manager.shutdown() ","def perform_analysis(args, skip_handler, context, actions, metadata): """""" Perform static analysis via the given (or if not, all) analyzers, in the given analysis context for the supplied build actions. Additionally, insert statistical information into the metadata dict. """""" analyzers = args.analyzers if 'analyzers' in args \ else analyzer_types.supported_analyzers analyzers, _ = analyzer_types.check_supported_analyzers( analyzers, context) ctu_collect = False ctu_analyze = False ctu_dir = '' if 'ctu_phases' in args: ctu_dir = os.path.join(args.output_path, 'ctu-dir') args.ctu_dir = ctu_dir if ClangSA.ANALYZER_NAME not in analyzers: LOG.error(""CTU can only be used with the clang static analyzer."") return ctu_collect = args.ctu_phases[0] ctu_analyze = args.ctu_phases[1] if 'stats_enabled' in args and args.stats_enabled: if ClangSA.ANALYZER_NAME not in analyzers: LOG.debug(""Statistics can only be used with "" ""the Clang Static Analyzer."") return actions = prepare_actions(actions, analyzers) config_map = analyzer_types.build_config_handlers(args, context, analyzers) available_checkers = set() # Add profile names to the checkers list so we will not warn # if a profile is enabled but there is no checker with that name. available_checkers.update(context.available_profiles.keys()) # Collect all the available checkers from the enabled analyzers. for analyzer in config_map.items(): _, analyzer_cfg = analyzer for analyzer_checker in analyzer_cfg.checks().items(): checker_name, _ = analyzer_checker available_checkers.add(checker_name) if 'ordered_checkers' in args: missing_checkers = checkers.available(args.ordered_checkers, available_checkers) if missing_checkers: LOG.warning(""No checker(s) with these names was found:\n%s"", '\n'.join(missing_checkers)) LOG.warning(""Please review the checker names.\n"" ""In the next release the analysis will not start "" "" with invalid checker names."") if 'stats_enabled' in args: config_map[ClangSA.ANALYZER_NAME].set_checker_enabled( SpecialReturnValueCollector.checker_analyze) config_map[ClangSA.ANALYZER_NAME].set_checker_enabled( ReturnValueCollector.checker_analyze) # Statistics collector checkers must be explicitly disabled # as they trash the output. if ""clangsa"" in analyzers: config_map[ClangSA.ANALYZER_NAME].set_checker_enabled( SpecialReturnValueCollector.checker_collect, False) config_map[ClangSA.ANALYZER_NAME].set_checker_enabled( ReturnValueCollector.checker_collect, False) # Save some metadata information. versions = __get_analyzer_version(context, config_map) metadata['versions'].update(versions) metadata['checkers'] = {} for analyzer in analyzers: metadata['checkers'][analyzer] = {} for check, data in config_map[analyzer].checks().items(): enabled, _ = data metadata['checkers'][analyzer].update({check: enabled}) if ctu_collect: shutil.rmtree(ctu_dir, ignore_errors=True) elif ctu_analyze and not os.path.exists(ctu_dir): LOG.error(""CTU directory: '%s' does not exist."", ctu_dir) return start_time = time.time() # Use Manager to create data objects which can be # safely shared between processes. manager = SyncManager() manager.start(__mgr_init) config_map = manager.dict(config_map) actions_map = create_actions_map(actions, manager) # Setting to not None value will enable statistical analysis features. statistics_data = __get_statistics_data(args, manager) if ctu_collect or statistics_data: ctu_data = None if ctu_collect or ctu_analyze: ctu_capability = config_map[ClangSA.ANALYZER_NAME].ctu_capability ctu_data = manager.dict({'ctu_dir': ctu_dir, 'ctu_func_map_cmd': ctu_capability.mapping_tool_path, 'ctu_func_map_file': ctu_capability.mapping_file_name, 'ctu_temp_fnmap_folder': 'tmpExternalFnMaps'}) pre_analyze = [a for a in actions if a.analyzer_type == ClangSA.ANALYZER_NAME] pre_analysis_manager.run_pre_analysis(pre_analyze, context, config_map, args.jobs, skip_handler, ctu_data, statistics_data, manager) if 'stats_output' in args and args.stats_output: return if 'stats_dir' in args and args.stats_dir: statistics_data = manager.dict({'stats_out_dir': args.stats_dir}) ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \ args.ctu_reanalyze_on_failure if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect): LOG.info(""Starting static analysis ..."") analysis_manager.start_workers(actions_map, actions, context, config_map, args.jobs, args.output_path, skip_handler, metadata, 'quiet' in args, 'capture_analysis_output' in args, args.timeout if 'timeout' in args else None, ctu_reanalyze_on_failure, statistics_data, manager) LOG.info(""Analysis finished."") LOG.info(""To view results in the terminal use the "" ""\""CodeChecker parse\"" command."") LOG.info(""To store results use the \""CodeChecker store\"" command."") LOG.info(""See --help and the user guide for further options about"" "" parsing and storing the reports."") LOG.info(""----=================----"") end_time = time.time() LOG.info(""Analysis length: %s sec."", end_time - start_time) metadata['timestamps'] = {'begin': start_time, 'end': end_time} if ctu_collect and ctu_analyze: shutil.rmtree(ctu_dir, ignore_errors=True) manager.shutdown() " 39650,"def _get_output_dask_ar_meta_for_estimator(model_fn, estimator, input_dask_ar): """""" Returns the output metadata array for the model function (predict, transform etc) by running the appropriate function on dummy data of shape (1, n_features) Parameters ---------- model_fun: Model function _predict, _transform etc estimator : Estimator The underlying estimator that is fit. input_dask_ar: The input dask_array Returns ------- metadata: metadata of output dask array """""" # sklearn fails if input array has size size # It requires at least 1 sample to run successfully imput_meta = input_dask_ar._meta if hasattr(imput_meta, ""__array_function__""): ar = np.zeros( shape=(1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype, like=imput_meta, ) elif ""scipy.sparse"" in type(imput_meta).__module__: # sparse matrices dont support # `like` due to non implimented __array_function__ # Refer https:/q/github.com/scipy/scipy/issues/10362 # Note below works for both cupy and scipy sparse matrices ar = type(imput_meta)((1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype) else: msg = ( ""\nYou did not provide metadata, so Dask is running the"" ""function on a small dataset to guess output types. "" ""It is possible that Dask will guess incorrectly.\n"" ""To provide an explicit output types or to silence this message, "" ""please provide the `predict_meta`, `predict_proba_meta`,"" ""`transform_meta` as appropiate"" ) warnings.warn(msg) ar = np.zeros(shape=(1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype) return model_fn(ar, estimator) ","def _get_output_dask_ar_meta_for_estimator(model_fn, estimator, input_dask_ar): """""" Returns the output metadata array for the model function (predict, transform etc) by running the appropriate function on dummy data of shape (1, n_features) Parameters ---------- model_fun: Model function _predict, _transform etc estimator : Estimator The underlying estimator that is fit. input_dask_ar: The input dask_array Returns ------- metadata: metadata of output dask array """""" # sklearn fails if input array has size size # It requires at least 1 sample to run successfully imput_meta = input_dask_ar._meta if hasattr(imput_meta, ""__array_function__""): ar = np.zeros( shape=(1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype, like=imput_meta, ) elif ""scipy.sparse"" in type(imput_meta).__module__: # sparse matrices dont support # `like` due to non implimented __array_function__ # Refer https://github.com/scipy/scipy/issues/10362 # Note below works for both cupy and scipy sparse matrices ar = type(imput_meta)((1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype) else: msg = ( ""\nYou did not provide metadata, so Dask is running the"" ""function on a small dataset to guess output types. "" ""It is possible that Dask will guess incorrectly.\n"" ""To provide an explicit output types or to silence this message, "" ""please provide the `predict_meta`, `predict_proba_meta`,"" ""`transform_meta` as appropiate"" ) warnings.warn(msg) ar = np.zeros(shape=(1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype) return model_fn(ar, estimator) " 5642,"def convmtx(a, n, mode='full'): """""" Construct a convolution matrix. Constructs a matrix `A = convmtx(a, n[, mode])` such that `np.dot(A, v)` is equivalent to `convolve(a, v[, mode])` but slower. Parameters ---------- a : The array to convolve. n : The number of columns in the resulting matrix. This is analogous to the length of v in numpy.convolve(v, a) mode : This is analogous to `mode` in numpy.convolve(v, a, mode). It determines the number of rows in A is 'full'[default]: `len(a) + n - 1` 'same': `max(n, len(a) )` 'valid': `max(n, len(a) ) - min(n, len(a) ) + 1` Returns ------- A : The convolution matrix Notes ----- .. versionadded:: 1.4.0 See Also -------- toeplitz : Toeplitz matrix Examples -------- >>> from scipy.linalg import convmtx >>> convmtx( (-1,2,-1), 5, mode='same') array([[ 2, -1, 0, 0, 0], [-1, 2, -1, 0, 0], [ 0, -1, 2, -1, 0], [ 0, 0, -1, 2, -1], [ 0, 0, 0, -1, 2]]) """""" a = np.asarray(a) if a.ndim != 1: raise ValueError('convmtx expects a 1d array as input') row0 = np.concatenate(((a[0],), np.zeros(n-1, a.dtype))) col0 = np.concatenate((a, np.zeros(n-1, a.dtype))) A = toeplitz(col0, row0) # The efficiency of this construction could be improved for # 'valid','same' modes ,especially when n << len(a). if mode == 'valid': tb = min(n, len(a)) - 1 te = tb elif mode == 'same': trim = min(n, len(a)) - 1 tb = trim//2 te = trim-tb else: return A return A[tb:-te] ","def convmtx(a, n, mode='full'): """""" Construct a convolution matrix. Constructs a matrix `A = convmtx(a, n[, mode])` such that `np.dot(A, v)` is equivalent to `convolve(a, v[, mode])` but slower. Parameters ---------- a : The array to convolve. n : The number of columns in the resulting matrix. This is analogous to the length of v in numpy.convolve(v, a) mode : This is analogous to `mode` in numpy.convolve(v, a, mode). It determines the number of rows in A as 'full'[default]: `len(a) + n - 1` 'same': `max(n, len(a) )` 'valid': `max(n, len(a) ) - min(n, len(a) ) + 1` Returns ------- A : The convolution matrix Notes ----- .. versionadded:: 1.4.0 See Also -------- toeplitz : Toeplitz matrix Examples -------- >>> from scipy.linalg import convmtx >>> convmtx( (-1,2,-1), 5, mode='same') array([[ 2, -1, 0, 0, 0], [-1, 2, -1, 0, 0], [ 0, -1, 2, -1, 0], [ 0, 0, -1, 2, -1], [ 0, 0, 0, -1, 2]]) """""" a = np.asarray(a) if a.ndim != 1: raise ValueError('convmtx expects a 1d array as input') row0 = np.concatenate(((a[0],), np.zeros(n-1, a.dtype))) col0 = np.concatenate((a, np.zeros(n-1, a.dtype))) A = toeplitz(col0, row0) # The efficiency of this construction could be improved for # 'valid','same' modes ,especially when n << len(a). if mode == 'valid': tb = min(n, len(a)) - 1 te = tb elif mode == 'same': trim = min(n, len(a)) - 1 tb = trim//2 te = trim-tb else: return A return A[tb:-te] " 36399,"def runtest(ns, test_name): """"""Run a single test. ns -- regrtest namespace of options test_name -- the name of the test Returns the tuple (result, test_time, xml_data), where result is one of the constants: INTERRUPTED KeyboardInterrupt RESOURCE_DENIED test skipped because resource denied SKIPPED test skipped for some other reason ENV_CHANGED test failed because it changed the execution environment FAILED test failed PASSED test passed EMPTY_TEST_SUITE test ran no subtests. TIMEOUT test timd out. If ns.xmlpath is not None, xml_data is a list containing each generated testsuite element. """""" try: return _runtest(ns, test_name) except: if not ns.pgo: msg = traceback.format_exc() print(f""test {test_name} crashed -- {msg}"", file=sys.stderr, flush=True) return TestResult(test_name, FAILED, 0.0, None) ","def runtest(ns, test_name): """"""Run a single test. ns -- regrtest namespace of options test_name -- the name of the test Returns the tuple (result, test_time, xml_data), where result is one of the constants: INTERRUPTED KeyboardInterrupt RESOURCE_DENIED test skipped because resource denied SKIPPED test skipped for some other reason ENV_CHANGED test failed because it changed the execution environment FAILED test failed PASSED test passed EMPTY_TEST_SUITE test ran no subtests. TIMEOUT test timed out. If ns.xmlpath is not None, xml_data is a list containing each generated testsuite element. """""" try: return _runtest(ns, test_name) except: if not ns.pgo: msg = traceback.format_exc() print(f""test {test_name} crashed -- {msg}"", file=sys.stderr, flush=True) return TestResult(test_name, FAILED, 0.0, None) " 4118,"def p_c_arg_decl(s, ctx, in_pyfunc, cmethod_flag = 0, nonempty = 0, kw_only = 0, annotated = 1): pos = s.position() not_none = or_none = 0 default = None annotation = None if s.in_python_file: # empty type declaration base_type = Nodes.CSimpleBaseTypeNode(pos, name = None, module_path = [], is_basic_c_type = 0, signed = 0, complex = 0, longness = 0, is_self_arg = cmethod_flag, templates = None) else: base_type = p_c_base_type(s, nonempty = nonempty) declarator = p_c_declarator(s, ctx, nonempty = nonempty) if s.sy in ('not', 'or') and not s.in_python_file: kind = s.sy s.next() if s.sy == 'IDENT' and s.systring == 'None': s.next() else: s.error(""Expected 'None'"") if not in_pyfunc: error(pos, ""'%s None' only allowed in Python functions"" % kind) or_none = kind == 'or' not_none = kind == 'not' if annotated and s.sy == ':': s.next() annotation = p_annotation(s) if s.sy == '=': s.next() if 'pxd' in ctx.level: if s.sy in ['*', '?']: # TODO(github/1736): Make this an error for inline declarations. default = ExprNodes.NoneNode(pos) s.next() elif 'inline' in ctx.modifiers: default = p_test(s) else: error(pos, ""default values cannot be specified in pxd files, use ? or *"") else: default = p_test(s) return Nodes.CArgDeclNode(pos, base_type = base_type, declarator = declarator, not_none = not_none, or_none = or_none, default = default, annotation = annotation, kw_only = kw_only) ","def p_c_arg_decl(s, ctx, in_pyfunc, cmethod_flag = 0, nonempty = 0, kw_only = 0, annotated = 1): pos = s.position() not_none = or_none = 0 default = None annotation = None if s.in_python_file: # empty type declaration base_type = Nodes.CSimpleBaseTypeNode(pos, name = None, module_path = [], is_basic_c_type = 0, signed = 0, complex = 0, longness = 0, is_self_arg = cmethod_flag, templates = None) else: base_type = p_c_base_type(s, nonempty=nonempty) declarator = p_c_declarator(s, ctx, nonempty = nonempty) if s.sy in ('not', 'or') and not s.in_python_file: kind = s.sy s.next() if s.sy == 'IDENT' and s.systring == 'None': s.next() else: s.error(""Expected 'None'"") if not in_pyfunc: error(pos, ""'%s None' only allowed in Python functions"" % kind) or_none = kind == 'or' not_none = kind == 'not' if annotated and s.sy == ':': s.next() annotation = p_annotation(s) if s.sy == '=': s.next() if 'pxd' in ctx.level: if s.sy in ['*', '?']: # TODO(github/1736): Make this an error for inline declarations. default = ExprNodes.NoneNode(pos) s.next() elif 'inline' in ctx.modifiers: default = p_test(s) else: error(pos, ""default values cannot be specified in pxd files, use ? or *"") else: default = p_test(s) return Nodes.CArgDeclNode(pos, base_type = base_type, declarator = declarator, not_none = not_none, or_none = or_none, default = default, annotation = annotation, kw_only = kw_only) " 40414,"def run(args: argparse.ArgumentParser) -> None: print(""BENCHMARK STARTS"") for dataset_name in args.datasets: print(""Dataset: "", dataset_name) if args.datasets_root == 'data': root = osp.join(osp.dirname(osp.realpath(__file__)), '../..', 'data', dataset_name.partition(""-"")[2]) else: root = args.datasets_root if dataset_name == 'ogbn-mag': transform = T.ToUndirected(merge=True) dataset = OGB_MAG(root=root, transform=transform) train_idx = ('paper', dataset[0]['paper'].train_mask) valid_idx = ('paper', dataset[0]['paper'].val_mask) neighbour_sizes = args.hetero_neighbour_sizes else: dataset = PygNodePropPredDataset(dataset_name, root) split_idx = dataset.get_idx_split() train_idx = split_idx['train'] valid_idx = split_idx['valid'] neighbour_sizes = args.homo_neighbour_sizes data = dataset[0].to(args.device) print('Train sampling') for sizes in neighbour_sizes: print(f'Sizes={sizes}') for batch_size in args.batch_sizes: train_loader = NeighborLoader(data, num_neighbors=sizes, input_nodes=train_idx, batch_size=batch_size, shuffle=True, num_workers=args.num_workers) start = default_timer() iter = 0 times = [] for run in range(args.runs): start = default_timer() for batch in train_loader: iter = iter + 1 stop = default_timer() times.append(round(stop - start, 3)) average_time = round(sum(times) / args.runs, 3) print(f'Batch size={batch_size} iterations={iter} ' + f'times={times} average_time={average_time}') print('Validation sampling') for batch_size in args.eval_batch_sizes: val_loader = NeighborLoader(data, num_neighbors=[-1], input_nodes=valid_idx, batch_size=batch_size, shuffle=False, num_workers=args.num_workers) start = default_timer() iter = 0 times = [] for run in range(args.runs): start = default_timer() for batch in val_loader: iter = iter + 1 stop = default_timer() times.append(round(stop - start, 3)) average_time = round(sum(times) / args.runs, 3) print(f'Batch size={batch_size} iterations={iter} ' + f'times={times} average_time={average_time}') ","def run(args: argparse.ArgumentParser) -> None: print(""BENCHMARK STARTS"") for dataset_name in args.datasets: print(""Dataset: "", dataset_name) if args.datasets_root == 'data': root = osp.join(osp.dirname(osp.realpath(__file__)), '../..', 'data', dataset_name.partition(""-"")[2]) else: root = args.datasets_root if dataset_name == 'ogbn-mag': transform = T.ToUndirected(merge=True) dataset = OGB_MAG(root=root, transform=transform) train_idx = ('paper', dataset[0]['paper'].train_mask) valid_idx = ('paper', dataset[0]['paper'].val_mask) neighbour_sizes = args.hetero_neighbour_sizes else: dataset = PygNodePropPredDataset(dataset_name, root) split_idx = dataset.get_idx_split() train_idx = split_idx['train'] valid_idx = split_idx['valid'] neighbour_sizes = args.homo_neighbour_sizes data = dataset[0].to(args.device) print('Train sampling') for sizes in neighbour_sizes: print(f'Sizes={sizes}') for batch_size in args.batch_sizes: train_loader = NeighborLoader(data, num_neighbors=sizes, input_nodes=train_idx, batch_size=batch_size, shuffle=True, num_workers=args.num_workers, ) start = default_timer() iter = 0 times = [] for run in range(args.runs): start = default_timer() for batch in train_loader: iter = iter + 1 stop = default_timer() times.append(round(stop - start, 3)) average_time = round(sum(times) / args.runs, 3) print(f'Batch size={batch_size} iterations={iter} ' + f'times={times} average_time={average_time}') print('Validation sampling') for batch_size in args.eval_batch_sizes: val_loader = NeighborLoader(data, num_neighbors=[-1], input_nodes=valid_idx, batch_size=batch_size, shuffle=False, num_workers=args.num_workers) start = default_timer() iter = 0 times = [] for run in range(args.runs): start = default_timer() for batch in val_loader: iter = iter + 1 stop = default_timer() times.append(round(stop - start, 3)) average_time = round(sum(times) / args.runs, 3) print(f'Batch size={batch_size} iterations={iter} ' + f'times={times} average_time={average_time}') " 32406,"def get_attachments_using_instance(email_related_incident, labels): """"""Use the instance from which the email was received to fetch the attachments. Only supported with: EWS V2, Gmail Args: email_related_incident (str): ID of the incident to attach the files to. labels (Dict): Incident's labels to fetch the relevant data from. """""" message_id = '' instance_name = '' integration_name = '' for label in labels: if label.get('type') == 'Email/ID': message_id = label.get('value') elif label.get('type') == 'Instance': instance_name = label.get('value') elif label.get('type') == 'Brand': integration_name = label.get('value') if integration_name == 'EWS v2': demisto.executeCommand(""executeCommandAt"", {'command': 'ews-get-attachment', 'incidents': email_related_incident, 'arguments': {'item-id': str(message_id), 'using': instance_name}}) elif integration_name == 'EWSO365': demisto.executeCommand(""executeCommandAt"", {'command': 'ews-get-attachment', 'incidents': email_related_incident, 'arguments': {'item-id': str(message_id), 'using': instance_name}}) elif integration_name == 'Gmail' or integration_name == 'Gmail Single User': demisto.executeCommand(""executeCommandAt"", {'command': 'gmail-get-attachments', 'incidents': email_related_incident, 'arguments': {'user-id': 'me', 'message-id': str(message_id), 'using': instance_name}}) # Note: attachments are downloaded by default when emails are fetched using the graph integrations, # so this method isn't needed for them. else: demisto.debug('Attachments could only be retrieved from EWS v2 or Gmail') ","def get_attachments_using_instance(email_related_incident, labels): """"""Use the instance from which the email was received to fetch the attachments. Only supported with: EWS V2, Gmail Args: email_related_incident (str): ID of the incident to attach the files to. labels (Dict): Incident's labels to fetch the relevant data from. """""" message_id = '' instance_name = '' integration_name = '' for label in labels: if label.get('type') == 'Email/ID': message_id = label.get('value') elif label.get('type') == 'Instance': instance_name = label.get('value') elif label.get('type') == 'Brand': integration_name = label.get('value') if integration_name == 'EWS v2': demisto.executeCommand(""executeCommandAt"", {'command': 'ews-get-attachment', 'incidents': email_related_incident, 'arguments': {'item-id': str(message_id), 'using': instance_name}}) elif integration_name == 'EWSO365': demisto.executeCommand(""executeCommandAt"", {'command': 'ews-get-attachment', 'incidents': email_related_incident, 'arguments': {'item-id': str(message_id), 'using': instance_name}}) elif integration_name in ['Gmail', 'Gmail Single User']: demisto.executeCommand(""executeCommandAt"", {'command': 'gmail-get-attachments', 'incidents': email_related_incident, 'arguments': {'user-id': 'me', 'message-id': str(message_id), 'using': instance_name}}) # Note: attachments are downloaded by default when emails are fetched using the graph integrations, # so this method isn't needed for them. else: demisto.debug('Attachments could only be retrieved from EWS v2 or Gmail') " 51456,"def guess_engine(store_spec): engines = list_engines() for engine, backend in engines.items(): try: if backend.guess_can_open(store_spec): return engine except Exception: warnings.warn(f""{engine!r} fails while guessing"", RuntimeWarning) compatible = [] for engine, backend_cls in BACKEND_ENTRYPOINTS.items(): try: backend = backend_cls() if backend.guess_can_open(store_spec): compatible.append(engine) except Exception: warnings.warn(f""{engine!r} fails while guessing"", RuntimeWarning) installed = [k for k in engines if k != ""store""] if not compatible: if installed: error_msg = ( ""did not find a match in any of xarray's currently installed IO "" f""backends {installed}. Consider explicitly selecting one of the "" ""installed engines via the ``engine`` parameter, or installing "" ""additional IO dependencies, see:\n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html\n"" ""http://xarray.pydata.org/en/stable/user-guide/io.html"" ) else: error_msg = ( ""xarray is unable to open this file because it has no currently "" ""installed IO backends. Xarray's read/write support requires "" ""installing optional IO dependencies, see:\n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html\n"" ""http://xarray.pydata.org/en/stable/user-guide/io"" ) else: error_msg = ( ""found the following matches with the input file in xarray's IO "" f""backends: {compatible}. But their dependencies may not be installed, see:\n"" ""http://xarray.pydata.org/en/stable/user-guide/io.html \n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html"" ) raise ValueError(error_msg) ","def guess_engine(store_spec): engines = list_engines() for engine, backend in engines.items(): try: if backend.guess_can_open(store_spec): return engine except Exception: warnings.warn(f""{engine!r} fails while guessing"", RuntimeWarning) compatible = [] for engine, backend_cls in BACKEND_ENTRYPOINTS.items(): try: backend = backend_cls() if backend.guess_can_open(store_spec): compatible.append(engine) except Exception: warnings.warn(f""{engine!r} fails while guessing"", RuntimeWarning) installed = [k for k in engines if k != ""store""] if not compatible: if installed: error_msg = ( ""did not find a match in any of xarray's currently installed IO "" f""backends {installed}. Consider explicitly selecting one of the "" ""installed engines via the ``engine`` parameter, or installing "" ""additional IO dependencies, see:\n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html\n"" ""http://xarray.pydata.org/en/stable/user-guide/io.html"" ) else: error_msg = ( ""xarray is unable to open this file because no IO backend "" ""is currently installed. Xarray's read/write support requires "" ""installing optional IO dependencies, see:\n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html\n"" ""http://xarray.pydata.org/en/stable/user-guide/io"" ) else: error_msg = ( ""found the following matches with the input file in xarray's IO "" f""backends: {compatible}. But their dependencies may not be installed, see:\n"" ""http://xarray.pydata.org/en/stable/user-guide/io.html \n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html"" ) raise ValueError(error_msg) " 39555,"def resolve_version_range( affected_version_range: VersionRange, package_versions: List[str], ignorable_versions=WEIRD_IGNORABLE_VERSIONS, ) -> Tuple[List[str], List[str]]: """""" Given an affected version range and a list of `package_versions`, resolve which versions are in this range and return a tuple of two lists of `affected_versions` and `unaffected_versions`. """""" if not affected_version_range: LOGGER.error(f""affected version range is {affected_version_range!r}"") return [], [] affected_versions = [] unaffected_versions = [] for package_version in package_versions or []: if package_version in ignorable_versions: continue # Remove whitespace package_version = package_version.replace("" "", """") # Remove leading 'v' package_version = package_version.lstrip(""v"").lstrip(""V"") try: version = affected_version_range.version_class(package_version) except Exception: LOGGER.error(f""Could not parse version {package_version!r}"") continue if version in affected_version_range: affected_versions.append(package_version) else: unaffected_versions.append(package_version) return affected_versions, unaffected_versions ","def resolve_version_range( affected_version_range: VersionRange, package_versions: List[str], ignorable_versions=WEIRD_IGNORABLE_VERSIONS, ) -> Tuple[List[str], List[str]]: """""" Given an affected version range and a list of `package_versions`, resolve which versions are in this range and return a tuple of two lists of `affected_versions` and `unaffected_versions`. """""" if not affected_version_range: LOGGER.error(f""affected version range is {affected_version_range!r}"") return [], [] affected_versions = [] unaffected_versions = [] for package_version in package_versions or []: if package_version in ignorable_versions: continue # Remove whitespace package_version = package_version.replace("" "", """") # Remove leading 'v' package_version = package_version.lstrip(""vV"") try: version = affected_version_range.version_class(package_version) except Exception: LOGGER.error(f""Could not parse version {package_version!r}"") continue if version in affected_version_range: affected_versions.append(package_version) else: unaffected_versions.append(package_version) return affected_versions, unaffected_versions " 38365,"def set_log_level(level: Union[int, str]) -> None: """""" Select which minimal logging level should be displayed. Parameters ---------- level: int or str Possible values by increasing level: 0 or ""notset"" 1 or ""all"" 10 or ""debug"" 20 or ""info"" 30 or ""warning"" 40 or ""error"" 50 or ""critical"" """""" # this is a user-facing interface to avoid importing from yt.utilities in user code. if isinstance(level, int): ytcfg[""logging"", ""level""] = loglevel_int2str(level) elif isinstance(level, str): ytcfg[""logging"", ""level""] = level.upper() level = loglevel_str2int(level) else: raise TypeError( f""Expected an int or an str, got `{level}` with type `{type(level)}`."" ) ytLogger.setLevel(level) ytLogger.debug(""log level to %s"", level) ","def set_log_level(level: Union[int, str]) -> None: """""" Select which minimal logging level should be displayed. Parameters ---------- level: int or str Possible values by increasing level: 0 or ""notset"" 1 or ""all"" 10 or ""debug"" 20 or ""info"" 30 or ""warning"" 40 or ""error"" 50 or ""critical"" """""" # this is a user-facing interface to avoid importing from yt.utilities in user code. if isinstance(level, int): ytcfg[""logging"", ""level""] = loglevel_int2str(level) elif isinstance(level, str): ytcfg[""logging"", ""level""] = level.upper() level = loglevel_str2int(level) else: raise TypeError( f""Expected an int or an str, got `{level}` with type `{type(level)}`."" ) ytLogger.setLevel(level) ytLogger.debug(""log level set to %s"", level) " 59299,"def _ks_assemble(asm): global ks from keystone import Ks, KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN if ks is None: ks = Ks(KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN) ords = ks.asm(asm)[0] if not ords: raise Exception(f""bad assembly: {asm}"") return binascii.hexlify(bytearray(ords)) ","def _ks_assemble(asm: str) -> bytes: """"""Assemble the given string using Keystone."""""" # Explicitly uses late importing so that Keystone will only be imported if this is called. # This lets us avoid requiring installation of Keystone for running tests. global ks from keystone import Ks, KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN if ks is None: ks = Ks(KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN) ords = ks.asm(asm)[0] if not ords: raise Exception(f""bad assembly: {asm}"") return binascii.hexlify(bytearray(ords)) " 27461,"def on_subscribe(unused_client, unused_userdata, mid, granted_qos): print('on_subscribe: mid {}, qos {granted_qos}'.format(mid)) try: client_addr, response = gateway_state.pending_subscribes[mid] udpSerSock.sendto(response.encode(), client_addr) except KeyError: print('Unable to find key {}'.format(mid)) ","def on_subscribe(unused_client, unused_userdata, mid, granted_qos): print('on_subscribe: mid {}, qos {granted_qos}'.format(mid)) try: client_addr, response = gateway_state.pending_subscribes[mid] udpSerSock.sendto(response.encode(), client_addr) except KeyError: print('Unable to find mid: {}'.format(mid)) " 45221,"def test_modin_concat(): df, df2 = generate_dfs() modin_df, modin_df2 = from_pandas(df), from_pandas(df2) df_equals(pd.concat([modin_df, modin_df2]), pandas.concat([df, df2])) ","def test_concat(): df, df2 = generate_dfs() modin_df, modin_df2 = from_pandas(df), from_pandas(df2) df_equals(pd.concat([modin_df, modin_df2]), pandas.concat([df, df2])) " 1722,"def permutation_importance(estimator, X, y, scoring=None, n_repeats=5, n_jobs=None, random_state=None): """"""Permutation importance for feature evaluation [BRE]_. The :term:`estimator` is required to be a fitted estimator. `X` can be the data set used to train the estimator or a hold-out set. The permutation importance of a feature is calculated as follows. First, a baseline metric, defined by :term:`scoring`, is evaluated on a (potentially different) dataset defined by the `X`. Next, a feature column from the validation set is permuted and the metric is evaluated again. The permutation importance is defined to be the difference between the baseline metric and metric from permutating the feature column. Read more in the :ref:`User Guide `. Parameters ---------- estimator : object An estimator that has already been :term:`fitted` and is compatible with :term:`scorer`. X : ndarray or DataFrame, shape (n_samples, n_features) Data on which permutation importance will be computed. y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) Targets for supervised or `None` for unsupervised. scoring : string, callable or None, default=None Scorer to use. It can be a single string (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`). If None, the estimator's default scorer is used. n_repeats : int, default=5 Number of times to permute a feature. n_jobs : int or None, default=None The number of jobs to use for the computation. `None` means 1 unless in a :obj:`joblib.parallel_backend` context. `-1` means using all processors. See :term:`Glossary ` for more details. random_state : int, RandomState instance, or None, default=None Pseudo-random number generator to control the permutations of each feature. See :term:`random_state`. Returns ------- result : Bunch Dictionary-like object, with attributes: importances_mean : ndarray, shape (n_features, ) Mean of feature importance over `n_repeats`. importances_std : ndarray, shape (n_features, ) Standard deviation over `n_repeats`. importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores. References ---------- .. [BRE] L. Breiman, ""Random Forests"", Machine Learning, 45(1), 5-32, 2001. https://doi.org/10.1023/A:1010933404324 Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.inspection import permutation_importance >>> X = [[1,9,9],[1,9,9],[1,9,9], ... [0,9,9],[0,9,9],[0,9,9]] >>> y = [1,1,1,0,0,0] >>> clf = LogisticRegression().fit(X,y) LogisticRegression(...) >>> result = permutation_importance(clf, X, y, n_repeats=10, ... random_state=0) {'importances_mean': array([0.5, 0. , 0. ]), 'importances_std': array([0.16666667, 0. , 0. ]), 'importances': array([[0.33333333, 0.66666667], [0. , 0. ], [0. , 0. ]])} >>> result.importances_mean array([0.5, 0. , 0. ]) >>> result.importances_std array([0.2236068, 0. , 0. ]) """""" if not hasattr(X, ""iloc""): X = check_array(X, force_all_finite='allow-nan', dtype=None) # Precompute random seed from the random state to be used # to get a fresh independent RandomState instance for each # parallel call to _calculate_permutation_scores, irrespective of # the fact that variables are shared or not depending on the active # joblib backend (sequential, thread-based or process-based). random_state = check_random_state(random_state) random_seed = random_state.randint(np.iinfo(np.int32).max + 1) scorer = check_scoring(estimator, scoring=scoring) baseline_score = scorer(estimator, X, y) scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)( estimator, X, y, col_idx, random_seed, n_repeats, scorer ) for col_idx in range(X.shape[1])) importances = baseline_score - np.array(scores) return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances) ","def permutation_importance(estimator, X, y, scoring=None, n_repeats=5, n_jobs=None, random_state=None): """"""Permutation importance for feature evaluation [BRE]_. The :term:`estimator` is required to be a fitted estimator. `X` can be the data set used to train the estimator or a hold-out set. The permutation importance of a feature is calculated as follows. First, a baseline metric, defined by :term:`scoring`, is evaluated on a (potentially different) dataset defined by the `X`. Next, a feature column from the validation set is permuted and the metric is evaluated again. The permutation importance is defined to be the difference between the baseline metric and metric from permutating the feature column. Read more in the :ref:`User Guide `. Parameters ---------- estimator : object An estimator that has already been :term:`fitted` and is compatible with :term:`scorer`. X : ndarray or DataFrame, shape (n_samples, n_features) Data on which permutation importance will be computed. y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) Targets for supervised or `None` for unsupervised. scoring : string, callable or None, default=None Scorer to use. It can be a single string (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`). If None, the estimator's default scorer is used. n_repeats : int, default=5 Number of times to permute a feature. n_jobs : int or None, default=None The number of jobs to use for the computation. `None` means 1 unless in a :obj:`joblib.parallel_backend` context. `-1` means using all processors. See :term:`Glossary ` for more details. random_state : int, RandomState instance, or None, default=None Pseudo-random number generator to control the permutations of each feature. See :term:`random_state`. Returns ------- result : Bunch Dictionary-like object, with attributes: importances_mean : ndarray, shape (n_features, ) Mean of feature importance over `n_repeats`. importances_std : ndarray, shape (n_features, ) Standard deviation over `n_repeats`. importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores. References ---------- .. [BRE] L. Breiman, ""Random Forests"", Machine Learning, 45(1), 5-32, 2001. https://doi.org/10.1023/A:1010933404324 Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.inspection import permutation_importance >>> X = [[1,9,9],[1,9,9],[1,9,9], ... [0,9,9],[0,9,9],[0,9,9]] >>> y = [1, 1, 1, 0, 0, 0] >>> clf = LogisticRegression().fit(X,y) LogisticRegression(...) >>> result = permutation_importance(clf, X, y, n_repeats=10, ... random_state=0) {'importances_mean': array([0.5, 0. , 0. ]), 'importances_std': array([0.16666667, 0. , 0. ]), 'importances': array([[0.33333333, 0.66666667], [0. , 0. ], [0. , 0. ]])} >>> result.importances_mean array([0.5, 0. , 0. ]) >>> result.importances_std array([0.2236068, 0. , 0. ]) """""" if not hasattr(X, ""iloc""): X = check_array(X, force_all_finite='allow-nan', dtype=None) # Precompute random seed from the random state to be used # to get a fresh independent RandomState instance for each # parallel call to _calculate_permutation_scores, irrespective of # the fact that variables are shared or not depending on the active # joblib backend (sequential, thread-based or process-based). random_state = check_random_state(random_state) random_seed = random_state.randint(np.iinfo(np.int32).max + 1) scorer = check_scoring(estimator, scoring=scoring) baseline_score = scorer(estimator, X, y) scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)( estimator, X, y, col_idx, random_seed, n_repeats, scorer ) for col_idx in range(X.shape[1])) importances = baseline_score - np.array(scores) return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances) " 25810,"def get_locale_paths(opts={}): """""" Given the value for a plugin_file or the comma-separated path for any module(s) which we need to interact with, this will return the locale_paths values as returned by ..webpack_json.initialize_plugins Alternatively, we can use given ""locale paths"" to get what we need. Note that per the crowdin.cli documentation for the plugin* args, plugins_path will be attempted first if both options are given. Returns a set() of paths where locale files can be found """""" plugins_path = opts.get(""plugins_path"") plugins_list = opts.get(""plugins_list"", """") locale_paths = opts.get(""locale_paths"", """") if plugins_path: paths = _get_locale_paths_from_file(plugins_path) elif plugins_list: paths = _get_locale_paths_from_list(plugins_list) else: paths = _get_locale_paths_from_paths(locale_paths) locale_data_folders = [path[""locale_data_folder""] for path in paths] return set(locale_data_folders) ","def get_locale_paths(opts={}): """""" Given the value for a plugin_file or the comma-separated path for any module(s) which we need to interact with, this will return the locale_paths values as returned by ..webpack_json.initialize_plugins Alternatively, we can use given ""locale paths"" to get what we need. Note that per the crowdin.cli documentation for the plugin* args, plugins_path will be attempted first if both options are given. Returns a set() of paths where locale files can be found """""" plugins_path = opts.get(""plugins_path"") plugins_list = opts.get(""plugins_list"", """") locale_paths = opts.get(""locale_paths"", """") if plugins_path: paths = _get_locale_paths_from_file(plugins_path) elif plugins_list: paths = _get_locale_paths_from_list(plugins_list) else: paths = _get_locale_paths_from_paths(locale_paths) locale_data_folders = {path[""locale_data_folder""] for path in paths} return set(locale_data_folders) " 1104,"def rebase_path_traits(thistrait, value, cwd): """"""Rebase a BasePath-derived trait given an interface spec."""""" if thistrait.is_trait_type(BasePath): value = _rebase_path(value, cwd) elif thistrait.is_trait_type(traits.List): innertrait, = thistrait.inner_traits if not isinstance(value, (list, tuple)): value = rebase_path_traits(innertrait, value, cwd) else: value = [rebase_path_traits(innertrait, v, cwd) for v in value] elif thistrait.is_trait_type(traits.Dict): _, innertrait = thistrait.inner_traits value = {k: rebase_path_traits(innertrait, v, cwd) for k, v in value.items()} elif thistrait.is_trait_type(Tuple): value = tuple([rebase_path_traits(subtrait, v, cwd) for subtrait, v in zip(thistrait.inner_traits, value)]) elif thistrait.alternatives: is_str = [f.is_trait_type((traits.String, traits.BaseStr, traits.BaseBytes, Str)) for f in thistrait.alternatives] if any(is_str) and isinstance(value, (bytes, str)) and not value.startswith('/'): return value for subtrait in thistrait.alternatives: value = rebase_path_traits(subtrait, value, cwd) return value ","def rebase_path_traits(thistrait, value, cwd): """"""Rebase a BasePath-derived trait given an interface spec."""""" if thistrait.is_trait_type(BasePath): value = _rebase_path(value, cwd) elif thistrait.is_trait_type(traits.List): innertrait, = thistrait.inner_traits() if not isinstance(value, (list, tuple)): value = rebase_path_traits(innertrait, value, cwd) else: value = [rebase_path_traits(innertrait, v, cwd) for v in value] elif thistrait.is_trait_type(traits.Dict): _, innertrait = thistrait.inner_traits value = {k: rebase_path_traits(innertrait, v, cwd) for k, v in value.items()} elif thistrait.is_trait_type(Tuple): value = tuple([rebase_path_traits(subtrait, v, cwd) for subtrait, v in zip(thistrait.inner_traits, value)]) elif thistrait.alternatives: is_str = [f.is_trait_type((traits.String, traits.BaseStr, traits.BaseBytes, Str)) for f in thistrait.alternatives] if any(is_str) and isinstance(value, (bytes, str)) and not value.startswith('/'): return value for subtrait in thistrait.alternatives: value = rebase_path_traits(subtrait, value, cwd) return value " 37955,"def fmt_docstring(module_func): r"""""" Decorator to insert common text into module docstrings. Should be the last decorator (at the top). Use any of these placeholders in your docstring to have them substituted: * ``{aliases}``: Insert a section listing the parameter aliases defined by decorator ``use_alias``. The following are places for common parameter descriptions: * ``{R}``: region (bounding box as west, east, south, north) * ``{J}``: projection (coordinate system to use) * ``{B}``: frame (map frame and axes parameters) * ``{U}``: timestamp (insert time stamp logo) * ``{CPT}``: cmap (the color palette table) * ``{G}``: color * ``{W}``: pen * ``{n}``: interpolation Parameters ---------- module_func : function The module function. Returns ------- module_func The same *module_func* but with the docstring formatted. Examples -------- >>> @fmt_docstring ... @use_alias(R=""region"", J=""projection"") ... def gmtinfo(**kwargs): ... ''' ... My nice module. ... ... Parameters ... ---------- ... {R} ... {J} ... ... {aliases} ... ''' ... pass >>> print(gmtinfo.__doc__) My nice module. Parameters ---------- region : str or list *Required if this is the first plot command*. *xmin/xmax/ymin/ymax*\ [**+r**][**+u**\ *unit*]. Specify the region of interest. Select map :doc:`region `. projection : str *Required if this is the first plot command*. *projcode*\[*projparams*/]\ *width*. Select map :doc:`projection `. frame : bool or str or list Set map boundary frame and axes attributes. Select map :doc:`frame `. **Aliases:** - J = projection - R = region """""" filler_text = {} if hasattr(module_func, ""aliases""): aliases = [""**Aliases:**\n""] for arg in sorted(module_func.aliases): alias = module_func.aliases[arg] aliases.append(""- {} = {}"".format(arg, alias)) filler_text[""aliases""] = ""\n"".join(aliases) for marker, text in COMMON_OPTIONS.items(): # Remove the indentation and the first line break from the multiline # strings so that it doesn't mess up the original docstring filler_text[marker] = textwrap.dedent(text.lstrip(""\n"")) # Dedent the docstring to make it all match the option text. docstring = textwrap.dedent(module_func.__doc__) module_func.__doc__ = docstring.format(**filler_text) return module_func ","def fmt_docstring(module_func): r"""""" Decorator to insert common text into module docstrings. Should be the last decorator (at the top). Use any of these placeholders in your docstring to have them substituted: * ``{aliases}``: Insert a section listing the parameter aliases defined by decorator ``use_alias``. The following are places for common parameter descriptions: * ``{R}``: region (bounding box as west, east, south, north) * ``{J}``: projection (coordinate system to use) * ``{B}``: frame (map frame and axes parameters) * ``{U}``: timestamp (insert time stamp logo) * ``{CPT}``: cmap (the color palette table) * ``{G}``: color * ``{W}``: pen * ``{n}``: interpolation Parameters ---------- module_func : function The module function. Returns ------- module_func The same *module_func* but with the docstring formatted. Examples -------- >>> @fmt_docstring ... @use_alias(R=""region"", J=""projection"") ... def gmtinfo(**kwargs): ... ''' ... My nice module. ... ... Parameters ... ---------- ... {R} ... {J} ... ... {aliases} ... ''' ... pass >>> print(gmtinfo.__doc__) My nice module. Parameters ---------- region : str or list *Required if this is the first plot command*. *xmin/xmax/ymin/ymax*\ [**+r**][**+u**\ *unit*]. Specify the region of interest. Select map :doc:`region `. projection : str *Required if this is the first plot command*. *projcode*\[*projparams*/]\ *width*. Select map :doc:`projection `. frame : bool or str or list Set map boundary :doc:`frame and axes attributes `. **Aliases:** - J = projection - R = region """""" filler_text = {} if hasattr(module_func, ""aliases""): aliases = [""**Aliases:**\n""] for arg in sorted(module_func.aliases): alias = module_func.aliases[arg] aliases.append(""- {} = {}"".format(arg, alias)) filler_text[""aliases""] = ""\n"".join(aliases) for marker, text in COMMON_OPTIONS.items(): # Remove the indentation and the first line break from the multiline # strings so that it doesn't mess up the original docstring filler_text[marker] = textwrap.dedent(text.lstrip(""\n"")) # Dedent the docstring to make it all match the option text. docstring = textwrap.dedent(module_func.__doc__) module_func.__doc__ = docstring.format(**filler_text) return module_func " 53508,"def foo(x, y): # [unused-argument] return {x: None} ","def print_point(x, y): # [unused-argument] print(f""Point is located at {x},{x}"") " 31566,"def get_users_preference_command(client): result = client.get_users_preference() if not result.get('success'): raise DemistoException(result['message']) del result['success'] table_header = list(result.keys()) display_title = ""User's Preference"" markdown = tableToMarkdown(display_title, result, headers=table_header) return CommandResults( readable_output=markdown, outputs_prefix='LogPoint.User.Preference', outputs=result ) ","def get_users_preference_command(client): result = client.get_users_preference() if not result.get('success'): raise DemistoException(result.get('message')) del result['success'] table_header = list(result.keys()) display_title = ""User's Preference"" markdown = tableToMarkdown(display_title, result, headers=table_header) return CommandResults( readable_output=markdown, outputs_prefix='LogPoint.User.Preference', outputs=result ) " 31017,"def update_remote_system_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> str: """""" This command pushes local changes to the remote system. Args: client: XSOAR Client to use. args: args['data']: the data to send to the remote system args['entries']: the entries to send to the remote system args['incident_changed']: boolean telling us if the local incident indeed changed or not args['remote_incident_id']: the remote incident id params: entry_tags: the tags to pass to the entries (to separate between comments and work_notes) Returns: The remote incident id - ticket_id """""" parsed_args = UpdateRemoteSystemArgs(args) if parsed_args.delta: demisto.debug(f'Got the following delta keys {str(list(parsed_args.delta.keys()))}') ticket_type = client.ticket_type ticket_id = parsed_args.remote_incident_id if parsed_args.incident_changed: demisto.debug(f'Incident changed: {parsed_args.incident_changed}') if ticket_type == 'sc_task' and parsed_args.inc_status == 2 and params.get('close_ticket'): parsed_args.data['state'] = '3' fields = get_ticket_fields(parsed_args.data, ticket_type=ticket_type) if not params.get('close_ticket'): fields = {key: val for key, val in fields.items() if key != 'closed_at' and key != 'resolved_at'} demisto.debug(f'Sending update request to server {ticket_type}, {ticket_id}, {fields}') result = client.update(ticket_type, ticket_id, fields) demisto.info(f'Ticket Update result {result}') entries = parsed_args.entries if entries: demisto.debug(f'New entries {entries}') for entry in entries: demisto.debug(f'Sending entry {entry.get(""id"")}, type: {entry.get(""type"")}') # Mirroring files as entries if entry.get('type') == 3: path_res = demisto.getFilePath(entry.get('id')) full_file_name = path_res.get('name') file_name, file_extension = os.path.splitext(full_file_name) if not file_extension: file_extension = '' client.upload_file(ticket_id, entry.get('id'), file_name + '_mirrored_from_xsoar' + file_extension, ticket_type) else: # Mirroring comment and work notes as entries tags = entry.get('tags', []) key = '' if params.get('work_notes_tag') in tags: key = 'work_notes' elif params.get('comment_tag') in tags: key = 'comments' user = entry.get('user', 'dbot') text = f""({user}): {str(entry.get('contents', ''))}\n\n Mirrored from Cortex XSOAR"" client.add_comment(ticket_id, ticket_type, key, text) return ticket_id ","def update_remote_system_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> str: """""" This command pushes local changes to the remote system. Args: client: XSOAR Client to use. args: args['data']: the data to send to the remote system args['entries']: the entries to send to the remote system args['incident_changed']: boolean telling us if the local incident indeed changed or not args['remote_incident_id']: the remote incident id params: entry_tags: the tags to pass to the entries (to separate between comments and work_notes) Returns: The remote incident id - ticket_id """""" parsed_args = UpdateRemoteSystemArgs(args) if parsed_args.delta: demisto.debug(f'Got the following delta keys {str(list(parsed_args.delta.keys()))}') ticket_type = client.ticket_type ticket_id = parsed_args.remote_incident_id if parsed_args.incident_changed: demisto.debug(f'Incident changed: {parsed_args.incident_changed}') if ticket_type == 'sc_task' and parsed_args.inc_status == IncidentStatus.DONE and params.get('close_ticket'): parsed_args.data['state'] = '3' fields = get_ticket_fields(parsed_args.data, ticket_type=ticket_type) if not params.get('close_ticket'): fields = {key: val for key, val in fields.items() if key != 'closed_at' and key != 'resolved_at'} demisto.debug(f'Sending update request to server {ticket_type}, {ticket_id}, {fields}') result = client.update(ticket_type, ticket_id, fields) demisto.info(f'Ticket Update result {result}') entries = parsed_args.entries if entries: demisto.debug(f'New entries {entries}') for entry in entries: demisto.debug(f'Sending entry {entry.get(""id"")}, type: {entry.get(""type"")}') # Mirroring files as entries if entry.get('type') == 3: path_res = demisto.getFilePath(entry.get('id')) full_file_name = path_res.get('name') file_name, file_extension = os.path.splitext(full_file_name) if not file_extension: file_extension = '' client.upload_file(ticket_id, entry.get('id'), file_name + '_mirrored_from_xsoar' + file_extension, ticket_type) else: # Mirroring comment and work notes as entries tags = entry.get('tags', []) key = '' if params.get('work_notes_tag') in tags: key = 'work_notes' elif params.get('comment_tag') in tags: key = 'comments' user = entry.get('user', 'dbot') text = f""({user}): {str(entry.get('contents', ''))}\n\n Mirrored from Cortex XSOAR"" client.add_comment(ticket_id, ticket_type, key, text) return ticket_id " 40720,"def _test_distrib_accumulator_device(device): metric_devices = [torch.device(""cpu"")] if torch.device(device).type != ""xla"": metric_devices.append(idist.device()) for metric_device in metric_devices: ssim = SSIM(data_range=1.0, device=metric_device) assert ssim._device == metric_device assert ssim._kernel.device == metric_device, ""{}:{} vs {}:{}"".format( type(ssim._kernel.device), ssim._kernel.device, type(metric_device), metric_device ) y_pred = torch.rand(1, 3, 12, 12, dtype=torch.float, device=device) y = y_pred * 0.65 ssim.update((y_pred, y)) assert ssim._sum_of_batchwise_ssim.device == metric_device, ""{}:{} vs {}:{}"".format( type(ssim._sum_of_batchwise_ssim.device), ssim._sum_of_batchwise_ssim.device, type(metric_device), metric_device, ) ","def _test_distrib_accumulator_device(device): metric_devices = [torch.device(""cpu"")] if torch.device(device).type != ""xla"": metric_devices.append(idist.device()) for metric_device in metric_devices: ssim = SSIM(data_range=1.0, device=metric_device) assert ssim._device == metric_device assert ssim._kernel.device == metric_device, ""{}:{} vs {}:{}"".format( type(ssim._kernel.device), ssim._kernel.device, type(metric_device), metric_device ) y_pred = torch.rand(2, 3, 28, 28, dtype=torch.float, device=device) y = y_pred * 0.65 ssim.update((y_pred, y)) assert ssim._sum_of_batchwise_ssim.device == metric_device, ""{}:{} vs {}:{}"".format( type(ssim._sum_of_batchwise_ssim.device), ssim._sum_of_batchwise_ssim.device, type(metric_device), metric_device, ) " 29345,"def main(args: Optional[Sequence[str]]=None) -> None: """"""Runs the script to setup GAE."""""" unused_parsed_args = _PARSER.parse_args(args=args) sys.path.append('.') sys.path.append(common.GOOGLE_APP_ENGINE_SDK_HOME) sys.path.append(os.path.join(common.OPPIA_TOOLS_DIR, 'webtest-3.0.0')) # Delete old *.pyc files. for directory, _, files in os.walk('.'): for file_name in files: if file_name.endswith('.pyc'): filepath = os.path.join(directory, file_name) os.remove(filepath) print( 'Checking whether google-cloud-sdk is installed in %s' % common.GOOGLE_CLOUD_SDK_HOME) if not os.path.exists(common.GOOGLE_CLOUD_SDK_HOME): print('Downloading Google Cloud SDK (this may take a little while)...') os.makedirs(common.GOOGLE_CLOUD_SDK_HOME) try: # If the google cloud version is updated here, the corresponding # lines (GAE_DIR and GCLOUD_PATH) in assets/release_constants.json # should also be updated. urlrequest.urlretrieve( 'https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/' 'google-cloud-sdk-364.0.0-linux-x86_64.tar.gz', filename='gcloud-sdk.tar.gz') except Exception as e: print('Error downloading Google Cloud SDK. Exiting.') raise Exception('Error downloading Google Cloud SDK.') from e print('Download complete. Installing Google Cloud SDK...') tar = tarfile.open(name='gcloud-sdk.tar.gz') tar.extractall( path=os.path.join( common.OPPIA_TOOLS_DIR, 'google-cloud-sdk-364.0.0/')) tar.close() os.remove('gcloud-sdk.tar.gz') # This command installs specific google cloud components for the google # cloud sdk to prevent the need for developers to install it themselves when # the app engine development server starts up. The --quiet parameter # specifically tells the gcloud program to autofill all prompts with default # values. In this case, that means accepting all installations of gcloud # packages. subprocess.call([ common.GCLOUD_PATH, 'components', 'install', 'beta', 'cloud-datastore-emulator', 'app-engine-python', 'app-engine-python-extras', '--quiet']) ","def main(args: Optional[Sequence[str]] = None) -> None: """"""Runs the script to setup GAE."""""" unused_parsed_args = _PARSER.parse_args(args=args) sys.path.append('.') sys.path.append(common.GOOGLE_APP_ENGINE_SDK_HOME) sys.path.append(os.path.join(common.OPPIA_TOOLS_DIR, 'webtest-3.0.0')) # Delete old *.pyc files. for directory, _, files in os.walk('.'): for file_name in files: if file_name.endswith('.pyc'): filepath = os.path.join(directory, file_name) os.remove(filepath) print( 'Checking whether google-cloud-sdk is installed in %s' % common.GOOGLE_CLOUD_SDK_HOME) if not os.path.exists(common.GOOGLE_CLOUD_SDK_HOME): print('Downloading Google Cloud SDK (this may take a little while)...') os.makedirs(common.GOOGLE_CLOUD_SDK_HOME) try: # If the google cloud version is updated here, the corresponding # lines (GAE_DIR and GCLOUD_PATH) in assets/release_constants.json # should also be updated. urlrequest.urlretrieve( 'https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/' 'google-cloud-sdk-364.0.0-linux-x86_64.tar.gz', filename='gcloud-sdk.tar.gz') except Exception as e: print('Error downloading Google Cloud SDK. Exiting.') raise Exception('Error downloading Google Cloud SDK.') from e print('Download complete. Installing Google Cloud SDK...') tar = tarfile.open(name='gcloud-sdk.tar.gz') tar.extractall( path=os.path.join( common.OPPIA_TOOLS_DIR, 'google-cloud-sdk-364.0.0/')) tar.close() os.remove('gcloud-sdk.tar.gz') # This command installs specific google cloud components for the google # cloud sdk to prevent the need for developers to install it themselves when # the app engine development server starts up. The --quiet parameter # specifically tells the gcloud program to autofill all prompts with default # values. In this case, that means accepting all installations of gcloud # packages. subprocess.call([ common.GCLOUD_PATH, 'components', 'install', 'beta', 'cloud-datastore-emulator', 'app-engine-python', 'app-engine-python-extras', '--quiet']) " 2594,"def test_label_propagation_closed_form(global_dtype): n_classes = 2 X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0) X = X.astype(global_dtype) y[::3] = -1 Y = np.zeros((len(y), n_classes + 1)) Y[np.arange(len(y)), y] = 1 unlabelled_idx = Y[:, (-1,)].nonzero()[0] labelled_idx = (Y[:, (-1,)] == 0).nonzero()[0] clf = label_propagation.LabelPropagation(max_iter=10000, gamma=0.1) clf.fit(X, y) # adopting notation from Zhu et al 2002 T_bar = clf._build_graph() Tuu = T_bar[tuple(np.meshgrid(unlabelled_idx, unlabelled_idx, indexing=""ij""))] Tul = T_bar[tuple(np.meshgrid(unlabelled_idx, labelled_idx, indexing=""ij""))] Y = Y[:, :-1] Y_l = Y[labelled_idx, :] Y_u = np.dot(np.dot(np.linalg.inv(np.eye(Tuu.shape[0]) - Tuu), Tul), Y_l) expected = Y.copy() expected[unlabelled_idx, :] = Y_u expected /= expected.sum(axis=1)[:, np.newaxis] assert_allclose(expected, clf.label_distributions_, 4) ","def test_label_propagation_closed_form(global_dtype): n_classes = 2 X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0) X = X.astype(global_dtype) y[::3] = -1 Y = np.zeros((len(y), n_classes + 1)) Y[np.arange(len(y)), y] = 1 unlabelled_idx = Y[:, (-1,)].nonzero()[0] labelled_idx = (Y[:, (-1,)] == 0).nonzero()[0] clf = label_propagation.LabelPropagation(max_iter=10000, gamma=0.1) clf.fit(X, y) # adopting notation from Zhu et al 2002 T_bar = clf._build_graph() Tuu = T_bar[tuple(np.meshgrid(unlabelled_idx, unlabelled_idx, indexing=""ij""))] Tul = T_bar[tuple(np.meshgrid(unlabelled_idx, labelled_idx, indexing=""ij""))] Y = Y[:, :-1] Y_l = Y[labelled_idx, :] Y_u = np.dot(np.dot(np.linalg.inv(np.eye(Tuu.shape[0]) - Tuu), Tul), Y_l) expected = Y.copy() expected[unlabelled_idx, :] = Y_u expected /= expected.sum(axis=1)[:, np.newaxis] assert_allclose(expected, clf.label_distributions_, atol=1e-4) " 31587,"def get_whois_history_command(client, args): hostname = args.get('hostname') page = int(args.get('page', 1)) res = client.get_whois_history(hostname=hostname, page=page) readable_output = tableToMarkdown(f""WHOIS history for {hostname}:"", res.get('items')) command_results = CommandResults( outputs_prefix=f""SecurityTrails.Domain"", outputs_key_field=""name"", outputs={ ""name"": hostname, f""whois_history"": res.get('items', []), ""whois_history_count"": res.get('count', 0) }, readable_output=readable_output ) return_results(command_results) domain_data = { ""Name"": hostname } contacts = res.get('items', [])[0].get('contact') if res.get('items', None) else [] admin_contact = [x for x in contacts if x.get('type', None) == ""administrativeContact""] admin_contact = admin_contact[0] if admin_contact else None registrant_contact = [x for x in contacts if x.get('type', None) == ""registrant""] registrant_contact = registrant_contact[0] if registrant_contact else None registrar_contact = admin_contact if admin_contact else None whois_objects = list() for x in res.get('items', []): whois_object = { ""DomainStatus"": "", "".join(x.get('status', [])), ""NameServers"": "", "".join(x.get('nameServers', [])), ""CreationDate"": datetime.fromtimestamp((x.get('createdDate') / 1000)).strftime(""%Y-%m-%dT%H:%M:%SZ"") if x.get('createdDate', None) else None, ""UpdatedDate"": datetime.fromtimestamp((x.get('updatedDate') / 1000)).strftime(""%Y-%m-%dT%H:%M:%SZ"") if x.get('updatedDate', None) else None, ""ExpirationDate"": datetime.fromtimestamp((x.get('expiresDate') / 1000)).strftime(""%Y-%m-%dT%H:%M:%SZ"") if x.get('expiresDate', None) else None } if admin_contact: whois_object['Admin'] = { ""Name"": admin_contact.get('name'), ""Email"": admin_contact.get('email'), ""Phone"": admin_contact.get('telephone') } if registrant_contact: whois_object['Registrant'] = { ""Name"": registrant_contact.get('name'), ""Email"": registrant_contact.get('email'), ""Phone"": registrant_contact.get('telephone') } if registrar_contact: whois_object['Registrar'] = { ""Name"": registrar_contact.get('name'), ""Email"": registrar_contact.get('email'), ""Phone"": registrar_contact.get('telephone') } whois_objects.append(whois_object) if len(whois_objects) > 0: domain_data['WHOIS/History'] = whois_objects create_standard_domain_context(domain_data=domain_data) ","def get_whois_history_command(client, args): hostname = args.get('hostname') page = int(args.get('page', 1)) res = client.get_whois_history(hostname=hostname, page=page) readable_output = tableToMarkdown(f""WHOIS history for {hostname}:"", res.get('items')) command_results = CommandResults( outputs_prefix=f""SecurityTrails.Domain"", outputs_key_field=""name"", outputs={ ""name"": hostname, ""whois_history"": res.get('items', []), ""whois_history_count"": res.get('count', 0) }, readable_output=readable_output ) return_results(command_results) domain_data = { ""Name"": hostname } contacts = res.get('items', [])[0].get('contact') if res.get('items', None) else [] admin_contact = [x for x in contacts if x.get('type', None) == ""administrativeContact""] admin_contact = admin_contact[0] if admin_contact else None registrant_contact = [x for x in contacts if x.get('type', None) == ""registrant""] registrant_contact = registrant_contact[0] if registrant_contact else None registrar_contact = admin_contact if admin_contact else None whois_objects = list() for x in res.get('items', []): whois_object = { ""DomainStatus"": "", "".join(x.get('status', [])), ""NameServers"": "", "".join(x.get('nameServers', [])), ""CreationDate"": datetime.fromtimestamp((x.get('createdDate') / 1000)).strftime(""%Y-%m-%dT%H:%M:%SZ"") if x.get('createdDate', None) else None, ""UpdatedDate"": datetime.fromtimestamp((x.get('updatedDate') / 1000)).strftime(""%Y-%m-%dT%H:%M:%SZ"") if x.get('updatedDate', None) else None, ""ExpirationDate"": datetime.fromtimestamp((x.get('expiresDate') / 1000)).strftime(""%Y-%m-%dT%H:%M:%SZ"") if x.get('expiresDate', None) else None } if admin_contact: whois_object['Admin'] = { ""Name"": admin_contact.get('name'), ""Email"": admin_contact.get('email'), ""Phone"": admin_contact.get('telephone') } if registrant_contact: whois_object['Registrant'] = { ""Name"": registrant_contact.get('name'), ""Email"": registrant_contact.get('email'), ""Phone"": registrant_contact.get('telephone') } if registrar_contact: whois_object['Registrar'] = { ""Name"": registrar_contact.get('name'), ""Email"": registrar_contact.get('email'), ""Phone"": registrar_contact.get('telephone') } whois_objects.append(whois_object) if len(whois_objects) > 0: domain_data['WHOIS/History'] = whois_objects create_standard_domain_context(domain_data=domain_data) " 58855,"def isneginf(x, out=None): """"""Returns a bool array, where True if input element is negative infinity. Args: x (cupy.ndarray): Input array. Returns: cupy.ndarray: Boolean array of same shape as ``x``. Examples -------- >>> cupy.isneginf(0) False >>> cupy.isneginf([4, -4, numpy.inf, -numpy.inf]) [False, False, False, True] .. seealso:: :func:`numpy.isneginf` """""" is_inf = cupy.isinf(x) try: signbit = cupy.signbit(x) except TypeError as e: dtype = cupy.asanyarray(x).dtype raise TypeError(f'This operation is not supported for {dtype} values ' 'as it would be uncertain.') from e else: return cupy.logical_and(is_inf, signbit, out=None) ","def isneginf(x, out=None): """"""Test element-wise for negative infinity, return result as bool array. Args: x (cupy.ndarray): Input array. Returns: cupy.ndarray: Boolean array of same shape as ``x``. Examples -------- >>> cupy.isneginf(0) False >>> cupy.isneginf([4, -4, numpy.inf, -numpy.inf]) [False, False, False, True] .. seealso:: :func:`numpy.isneginf` """""" is_inf = cupy.isinf(x) try: signbit = cupy.signbit(x) except TypeError as e: dtype = cupy.asanyarray(x).dtype raise TypeError(f'This operation is not supported for {dtype} values ' 'as it would be uncertain.') from e else: return cupy.logical_and(is_inf, signbit, out=None) " 31364,"def archiving_reasons_command(client: Client) -> CommandResults: """"""Get archiving reasons list from TOPdesk"""""" archiving_reasons = client.get_list(""/archiving-reasons"") return command_with_all_fields_readable_list(results=archiving_reasons, result_name='archiving reasons', output_prefix='archive_reason', outputs_key_field='id') ","def archiving_reasons_command(client: Client) -> CommandResults: """"""Get archiving reasons list from TOPdesk"""""" archiving_reasons = client.get_list(""/archiving-reasons"") return command_with_all_fields_readable_list(results=archiving_reasons, result_name='archiving reasons', output_prefix='ArchiveReason', outputs_key_field='id') " 58923,"def check_iterator( file_iterator, licdebug_file, timeout=30, notify_at_second=5, verbose=False ): """"""Loop over iterator"""""" max_time = time.time() + timeout notification_time = time.time() + notify_at_second notification_bool = True while time.time() < max_time: if time.time() > notification_time and notification_bool: print( ""PyMAPDL is taking more time than expected to connect to an MAPDL session.\n"" ""Checking if there are available licenses to connect to..."" ) notification_bool = False msg = next(file_iterator) if msg: LOG.info(msg) if verbose: print(f""Output from {licdebug_file}:\n{msg}"") if ""DENIED"" in msg: # read to the end of the file time.sleep(0.05) # permit flush messages = [msg] while True: msg = next(file_iterator).strip() if not msg: break messages.append(msg) raise LicenseServerConnectionError(""\n"".join(messages)) if ""CHECKOUT"" in msg: # successful license checkout return True raise TimeoutError( f""Exceeded timeout of {timeout} seconds while examining:\n{licdebug_file}"" ) ","def check_iterator( file_iterator, licdebug_file, timeout=30, notify_at_second=5, verbose=False ): """"""Loop over iterator"""""" max_time = time.time() + timeout notification_time = time.time() + notify_at_second notification_bool = True while time.time() < max_time: if time.time() > notification_time and notification_bool: print( ""PyMAPDL is taking longer than expected to connect to an MAPDL session.\n"" ""Checking if there are any available licenses..."" ) notification_bool = False msg = next(file_iterator) if msg: LOG.info(msg) if verbose: print(f""Output from {licdebug_file}:\n{msg}"") if ""DENIED"" in msg: # read to the end of the file time.sleep(0.05) # permit flush messages = [msg] while True: msg = next(file_iterator).strip() if not msg: break messages.append(msg) raise LicenseServerConnectionError(""\n"".join(messages)) if ""CHECKOUT"" in msg: # successful license checkout return True raise TimeoutError( f""Exceeded timeout of {timeout} seconds while examining:\n{licdebug_file}"" ) " 30643,"def parse_outputs( api_res: Dict, meta_fields: list = [], quota_fields: list = [], resources_fields: list = [], sandbox_filds: list = [] ) -> Dict[str, dict]: """"""Parse group data as received from CrowdStrike FalconX API into Demisto's conventions the output from the API is a dict that contains the keys: meta, resources and errors the meta contains a ""quota"" dict the ""resources"" is an array that contains the sandbox dict the function filters the wanted params from the api result :param api_res: the api result from the http request :param meta_fields: the wanted params that appear in the mate section :param quota_fields: the wanted params that appear in the quota section :param resources_fields: the wanted params that appear in the resources section :param sandbox_filds: the wanted params that appear in the sandbox section :return: a dict based on api_res with the wanted params only """""" if api_res.get(""errors""): # if there is an error in the api result, return only the error return api_res.get(""errors"") api_res_meta, api_res_quota, api_res_resources, api_res_sandbox = {}, {}, {}, {} resources_group_outputs, sandbox_group_outputs = {}, {} api_res_meta = api_res.get(""meta"") if api_res_meta: api_res_quota = api_res_meta.get(""quota"") meta_group_outputs = add_outputs_from_dict(api_res_meta, meta_fields) quota_group_outputs = add_outputs_from_dict(api_res_quota, quota_fields) if api_res.get(""resources""): # depended on the command, the resources section can be a str list or a list that contains # only one argument which is a dict if type(api_res.get(""resources"")[0]) == dict: api_res_resources = api_res.get(""resources"")[0] resources_group_outputs = add_outputs_from_dict(api_res_resources, resources_fields) if api_res_resources and api_res_resources.get(""sandbox""): api_res_sandbox = api_res_resources.get(""sandbox"")[0] sandbox_group_outputs = add_outputs_from_dict(api_res_sandbox, sandbox_filds) else: # the resources section is a list of strings resources_group_outputs = {""resources"": api_res.get(""resources"")} merged_dicts = {**meta_group_outputs, **quota_group_outputs, **resources_group_outputs, **sandbox_group_outputs} return {f'csfalconx.resource(val.resource === obj.resource)': merged_dicts} ","def parse_outputs( api_res: Dict, meta_fields: list = [], quota_fields: list = [], resources_fields: list = [], sandbox_filds: list = [] ) -> Dict[str, dict]: """"""Parse group data as received from CrowdStrike FalconX API into Demisto's conventions the output from the API is a dict that contains the keys: meta, resources and errors the meta contains a ""quota"" dict the ""resources"" is an array that contains the sandbox dict the function filters the wanted params from the api result :param api_res: the api result from the http request :param meta_fields: the wanted params that appear in the mate section :param quota_fields: the wanted params that appear in the quota section :param resources_fields: the wanted params that appear in the resources section :param sandbox_filds: the wanted params that appear in the sandbox section :return: a dict based on api_res with the wanted params only """""" if api_res.get(""errors""): # if there is an error in the api result, return only the error return api_res.get(""errors"") api_res_meta, api_res_quota, api_res_resources, api_res_sandbox = {}, {}, {}, {} resources_group_outputs, sandbox_group_outputs = {}, {} api_res_meta = api_res.get(""meta"") if api_res_meta: api_res_quota = api_res_meta.get(""quota"") meta_group_outputs = add_outputs_from_dict(api_res_meta, meta_fields) quota_group_outputs = add_outputs_from_dict(api_res_quota, quota_fields) if api_res.get(""resources""): # depended on the command, the resources section can be a str list or a list that contains # only one argument which is a dict if type(api_res.get(""resources"")[0]) == dict: api_res_resources = api_res.get(""resources"")[0] resources_group_outputs = add_outputs_from_dict(api_res_resources, resources_fields) if api_res_resources and api_res_resources.get(""sandbox""): api_res_sandbox = api_res_resources.get(""sandbox"")[0] sandbox_group_outputs = add_outputs_from_dict(api_res_sandbox, sandbox_filds) else: # the resources section is a list of strings resources_group_outputs = {""resources"": api_res.get(""resources"")} merged_dicts = {**meta_group_outputs, **quota_group_outputs, **resources_group_outputs, **sandbox_group_outputs} return {f'csfalconx.resource(val.id === obj.id)': merged_dicts} " 57759,"def do_deception_host_command(client, args): results = None error = None try: host = args.get('host') res = client.call_acal_api(action=Action.DECEPTION_HOST, host=host) if 200 == res.status_code \ and 'result' in res.json() \ and type(res.json()['result']) is bool: out_result = { 'is_deception': res.json()['result'], 'host': str(host) } results = CommandResults( outputs_prefix='Acalvio.IsDeceptionHost', outputs_key_field=['is_deception', 'host'], outputs=out_result, readable_output=tableToMarkdown ('Acalvio ShadowPlex - Deception Host', out_result), raw_response=res.json() ) else: error = get_acal_error(res) except DemistoException as de: raise Exception(de) return (results, error) # end of function - do_deception_host_command ","def do_deception_host_command(client, args): results = None error = None try: host = args.get('host') res = client.call_acal_api(action=Action.DECEPTION_HOST, host=host) if 200 == res.status_code \ and 'result' in res.json() \ and type(res.json()['result']) is bool: out_result = { 'IsDeception': res.json()['result'], 'Host': str(host) } results = CommandResults( outputs_prefix='Acalvio.IsDeceptionHost', outputs_key_field=['is_deception', 'host'], outputs=out_result, readable_output=tableToMarkdown ('Acalvio ShadowPlex - Deception Host', out_result), raw_response=res.json() ) else: error = get_acal_error(res) except DemistoException as de: raise Exception(de) return (results, error) # end of function - do_deception_host_command " 838,"def _charpoly(M, x='lambda', simplify=_simplify): """"""Computes characteristic polynomial det(x*I - M) where I is the identity matrix. A PurePoly is returned, so using different variables for ``x`` does not affect the comparison or the polynomials: Parameters ========== x : string, optional Name for the ""lambda"" variable, defaults to ""lambda"". simplify : function, optional Simplification function to use on the characteristic polynomial calculated. Defaults to ``simplify``. Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x, y >>> M = Matrix([[1, 3], [2, 0]]) >>> M.charpoly() PurePoly(lambda**2 - lambda - 6, lambda, domain='ZZ') >>> M.charpoly(x) == M.charpoly(y) True >>> M.charpoly(x) == M.charpoly(y) True Specifying ``x`` is optional; a symbol named ``lambda`` is used by default (which looks good when pretty-printed in unicode): >>> M.charpoly().as_expr() lambda**2 - lambda - 6 And if ``x`` clashes with an existing symbol, underscores will be prepended to the name to make it unique: >>> M = Matrix([[1, 2], [x, 0]]) >>> M.charpoly(x).as_expr() _x**2 - _x - 2*x Whether you pass a symbol or not, the generator can be obtained with the gen attribute since it may not be the same as the symbol that was passed: >>> M.charpoly(x).gen _x >>> M.charpoly(x).gen == x False Notes ===== The Samuelson-Berkowitz algorithm is used to compute the characteristic polynomial efficiently and without any division operations. Thus the characteristic polynomial over any commutative ring without zero divisors can be computed. If the determinant det(x*I - M) can be found out easily as in the case of an upper or a lower triangular matrix, then instead of Samuelson-Berkowitz algorithm, eigenvalues are computed and the characteristic polynomial with their help. See Also ======== det """""" if not M.is_square: raise NonSquareMatrixError() if M.is_lower or M.is_upper: diagonal_elements = M.diagonal() x = _uniquely_named_symbol(x, diagonal_elements) m = 1 for i in diagonal_elements: m = m * (x - simplify(i)) return PurePoly(m,x) berk_vector = _berkowitz_vector(M) x = _uniquely_named_symbol(x, berk_vector) return PurePoly([simplify(a) for a in berk_vector], x) ","def _charpoly(M, x='lambda', simplify=_simplify): """"""Computes characteristic polynomial det(x*I - M) where I is the identity matrix. A PurePoly is returned, so using different variables for ``x`` does not affect the comparison or the polynomials: Parameters ========== x : string, optional Name for the ""lambda"" variable, defaults to ""lambda"". simplify : function, optional Simplification function to use on the characteristic polynomial calculated. Defaults to ``simplify``. Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x, y >>> M = Matrix([[1, 3], [2, 0]]) >>> M.charpoly() PurePoly(lambda**2 - lambda - 6, lambda, domain='ZZ') >>> M.charpoly(x) == M.charpoly(y) True >>> M.charpoly(x) == M.charpoly(y) True Specifying ``x`` is optional; a symbol named ``lambda`` is used by default (which looks good when pretty-printed in unicode): >>> M.charpoly().as_expr() lambda**2 - lambda - 6 And if ``x`` clashes with an existing symbol, underscores will be prepended to the name to make it unique: >>> M = Matrix([[1, 2], [x, 0]]) >>> M.charpoly(x).as_expr() _x**2 - _x - 2*x Whether you pass a symbol or not, the generator can be obtained with the gen attribute since it may not be the same as the symbol that was passed: >>> M.charpoly(x).gen _x >>> M.charpoly(x).gen == x False Notes ===== The Samuelson-Berkowitz algorithm is used to compute the characteristic polynomial efficiently and without any division operations. Thus the characteristic polynomial over any commutative ring without zero divisors can be computed. If the determinant det(x*I - M) can be found out easily as in the case of an upper or a lower triangular matrix, then instead of Samuelson-Berkowitz algorithm, eigenvalues are computed and the characteristic polynomial with their help. See Also ======== det """""" if not M.is_square: raise NonSquareMatrixError() if M.is_lower or M.is_upper: diagonal_elements = M.diagonal() x = _uniquely_named_symbol(x, diagonal_elements) m = 1 for i in diagonal_elements: m = m * (x - simplify(i)) return PurePoly(m, x) berk_vector = _berkowitz_vector(M) x = _uniquely_named_symbol(x, berk_vector) return PurePoly([simplify(a) for a in berk_vector], x) " 14139,"def network_for_netcode(symbol): symbol = symbol.upper() netcode = symbol.lower() for prefix in search_prefixes(): try: module = importlib.import_module(""%s.%s"" % (prefix, netcode)) if module.network.symbol.upper() == symbol: module.symbol = symbol return module.network except (AttributeError, ImportError): return raise ValueError(""no network with symbol %s found"" % netcode) ","def network_for_netcode(symbol): symbol = symbol.upper() netcode = symbol.lower() for prefix in search_prefixes(): try: module = importlib.import_module(""%s.%s"" % (prefix, netcode)) if module.network.symbol.upper() == symbol: module.symbol = symbol return module.network except (AttributeError, ImportError): pass raise ValueError(""no network with symbol %s found"" % netcode) " 39997,"def _to_fixed_length_strings(value: np.ndarray) -> np.ndarray: """""" Convert variable length strings to fixed length. Currently a workaround for https://github.com/zarr-developers/zarr-python/pull/422 """""" new_dtype = [] for dt_name, (dt_type, _) in value.dtype.fields.items(): if dt_type.str[1] in (""U"", ""O""): new_dtype.append((dt_name, ""U200"")) else: new_dtype.append((dt_name, dt_type)) return value.astype(new_dtype) ","def _to_fixed_length_strings(value: np.ndarray) -> np.ndarray: """""" Convert variable length strings to fixed length. Currently a workaround for https://github.com/zarr-developers/zarr-python/pull/422 """""" new_dtype = [] for dt_name, (dt_type, _) in value.dtype.fields.items(): if dt_type.str[1] in (""U"", ""O""): new_dtype.append((dt_name, np.unicode)) else: new_dtype.append((dt_name, dt_type)) return value.astype(new_dtype) " 59605,"def main() -> None: parser = argparse.ArgumentParser( description=""Build wheels for all the platforms."", epilog="""""" Most options are supplied via environment variables or in --config-file (pyproject.toml usually). See https://github.com/pypa/cibuildwheel#options for info. """""", ) parser.add_argument( ""--platform"", choices=[""auto"", ""linux"", ""macos"", ""windows""], default=os.environ.get(""CIBW_PLATFORM"", ""auto""), help="""""" Platform to build for. Use this option to override the auto-detected platform or to run cibuildwheel on your development machine. Specifying ""macos"" or ""windows"" only works on that operating system, but ""linux"" works on all three, as long as Docker is installed. Default: auto. """""", ) arch_list_str = "", "".join(a.name for a in Architecture) parser.add_argument( ""--archs"", default=None, help=f"""""" Comma-separated list of CPU architectures to build for. When set to 'auto', builds the architectures natively supported on this machine. Set this option to build an architecture via emulation, for example, using binfmt_misc and QEMU. Default: auto. Choices: auto, auto64, auto32, native, all, {arch_list_str} """""", ) parser.add_argument( ""--output-dir"", type=Path, help=""Destination folder for the wheels. Default: wheelhouse."", ) parser.add_argument( ""--config-file"", default="""", help="""""" TOML config file. Default: """", meaning {package}/pyproject.toml, if it exists. To refer to a project inside your project, use {package}; this matters if you build from an SDist. """""", ) parser.add_argument( ""package_dir"", default=Path("".""), type=Path, nargs=""?"", help="""""" Path to the package that you want wheels for. Must be a subdirectory of the working directory. When set to a directory, the working directory is still considered the 'project' and is copied into the Docker container on Linux. Default: the working directory. This can also be a tar.gz file - if it is, then --config-file and --output-dir are relative to the current directory, and other paths are relative to the expanded SDist directory. """""", ) parser.add_argument( ""--print-build-identifiers"", action=""store_true"", help=""Print the build identifiers matched by the current invocation and exit."", ) parser.add_argument( ""--allow-empty"", action=""store_true"", help=""Do not report an error code if the build does not match any wheels."", ) parser.add_argument( ""--prerelease-pythons"", action=""store_true"", help=""Enable pre-release Python versions if available."", ) args = parser.parse_args(namespace=CommandLineArguments()) args.package_dir = args.package_dir.resolve() # This are always relative to the base directory, even in SDist builds args.output_dir = Path( args.output_dir if args.output_dir is not None else os.environ.get(""CIBW_OUTPUT_DIR"", ""wheelhouse"") ).resolve() # Standard builds if a directory or non-existent path is given if not args.package_dir.is_file() and not args.package_dir.name.endswith(""tar.gz""): build_in_directory(args) return # Tarfile builds require extraction and changing the directory with tempfile.TemporaryDirectory(prefix=""cibw-sdist-"") as temp_dir_str: temp_dir = Path(temp_dir_str) with tarfile.open(args.package_dir) as tar: tar.extractall(path=temp_dir) # The extract directory is now the project dir try: (project_dir,) = temp_dir.iterdir() except ValueError: raise SystemExit(""invalid sdist: didn't contain a single dir"") from None # This is now the new package dir args.package_dir = project_dir.resolve() with chdir(temp_dir): build_in_directory(args) ","def main() -> None: parser = argparse.ArgumentParser( description=""Build wheels for all the platforms."", epilog="""""" Most options are supplied via environment variables or in --config-file (pyproject.toml usually). See https://github.com/pypa/cibuildwheel#options for info. """""", ) parser.add_argument( ""--platform"", choices=[""auto"", ""linux"", ""macos"", ""windows""], default=os.environ.get(""CIBW_PLATFORM"", ""auto""), help="""""" Platform to build for. Use this option to override the auto-detected platform or to run cibuildwheel on your development machine. Specifying ""macos"" or ""windows"" only works on that operating system, but ""linux"" works on all three, as long as Docker is installed. Default: auto. """""", ) arch_list_str = "", "".join(a.name for a in Architecture) parser.add_argument( ""--archs"", default=None, help=f"""""" Comma-separated list of CPU architectures to build for. When set to 'auto', builds the architectures natively supported on this machine. Set this option to build an architecture via emulation, for example, using binfmt_misc and QEMU. Default: auto. Choices: auto, auto64, auto32, native, all, {arch_list_str} """""", ) parser.add_argument( ""--output-dir"", type=Path, help=""Destination folder for the wheels. Default: wheelhouse."", ) parser.add_argument( ""--config-file"", default="""", help="""""" TOML config file. Default: """", meaning {package}/pyproject.toml, if it exists. To refer to a project inside your project, use {package}; this matters if you build from an SDist. """""", ) parser.add_argument( ""package_dir"", default=Path("".""), type=Path, nargs=""?"", help="""""" Path to the package that you want wheels for. Default: the working directory. Can be a directory inside the working directory, or an sdist. When set to a directory, the working directory is still considered the 'project' and is copied into the Docker container on Linux. When set to a tar.gz sdist file, --config-file and --output-dir are relative to the current directory, and other paths are relative to the expanded SDist directory. """""", ) parser.add_argument( ""--print-build-identifiers"", action=""store_true"", help=""Print the build identifiers matched by the current invocation and exit."", ) parser.add_argument( ""--allow-empty"", action=""store_true"", help=""Do not report an error code if the build does not match any wheels."", ) parser.add_argument( ""--prerelease-pythons"", action=""store_true"", help=""Enable pre-release Python versions if available."", ) args = parser.parse_args(namespace=CommandLineArguments()) args.package_dir = args.package_dir.resolve() # This are always relative to the base directory, even in SDist builds args.output_dir = Path( args.output_dir if args.output_dir is not None else os.environ.get(""CIBW_OUTPUT_DIR"", ""wheelhouse"") ).resolve() # Standard builds if a directory or non-existent path is given if not args.package_dir.is_file() and not args.package_dir.name.endswith(""tar.gz""): build_in_directory(args) return # Tarfile builds require extraction and changing the directory with tempfile.TemporaryDirectory(prefix=""cibw-sdist-"") as temp_dir_str: temp_dir = Path(temp_dir_str) with tarfile.open(args.package_dir) as tar: tar.extractall(path=temp_dir) # The extract directory is now the project dir try: (project_dir,) = temp_dir.iterdir() except ValueError: raise SystemExit(""invalid sdist: didn't contain a single dir"") from None # This is now the new package dir args.package_dir = project_dir.resolve() with chdir(temp_dir): build_in_directory(args) " 53870,"def is_os_disk(DiskCreateOption, disk_create_option, os_type): if disk_create_option == DiskCreateOption.from_image or disk_create_option in \ [DiskCreateOption.import_enum, DiskCreateOption.upload, DiskCreateOption.empty] and os_type: return True return False ","def is_os_disk(DiskCreateOption, disk_create_option, os_type): if disk_create_option == DiskCreateOption.from_image or (disk_create_option in \ [DiskCreateOption.import_enum, DiskCreateOption.upload, DiskCreateOption.empty] and os_type): return True return False " 39935,"def select_stake(staker: Staker, emitter: StdoutEmitter, stakes_status: Stake.Status = Stake.Status.EDITABLE, filter_function: Callable[[Stake], bool] = None ) -> Stake: """"""Interactively select a stake or abort if there are no eligible stakes."""""" if stakes_status.is_child(Stake.Status.DIVISIBLE): emitter.echo(ONLY_DISPLAYING_DIVISIBLE_STAKES_NOTE, color='yellow') # Filter stakes by status stakes = staker.sorted_stakes(parent_status=stakes_status, filter_function=filter_function) if not stakes: emitter.echo(NO_STAKES_FOUND, color='red') raise click.Abort # Interactive Selection paint_unlocked = stakes_status is Stake.Status.INACTIVE paint_stakes(staker=staker, emitter=emitter, stakes=stakes, paint_unlocked=paint_unlocked) indexed_stakes = {stake.index: stake for stake in stakes} indices = [str(index) for index in indexed_stakes.keys()] choice = click.prompt(SELECT_STAKE, type=click.Choice(indices)) chosen_stake = indexed_stakes[int(choice)] return chosen_stake ","def select_stake(staker: Staker, emitter: StdoutEmitter, stakes_status: Stake.Status = Stake.Status.EDITABLE, filter_function: Callable[[Stake], bool] = None ) -> Stake: """"""Interactively select a stake or abort if there are no eligible stakes."""""" if stakes_status.is_child(Stake.Status.DIVISIBLE): emitter.echo(ONLY_DISPLAYING_DIVISIBLE_STAKES_NOTE, color='yellow') # Filter stakes by status stakes = staker.sorted_stakes(parent_status=stakes_status, filter_function=filter_function) if not stakes: emitter.echo(NO_STAKES_FOUND, color='red') raise click.Abort # Interactive Selection paint_unlocked = stakes_status.is_child(Stake.Status.UNLOCKED) paint_stakes(staker=staker, emitter=emitter, stakes=stakes, paint_unlocked=paint_unlocked) indexed_stakes = {stake.index: stake for stake in stakes} indices = [str(index) for index in indexed_stakes.keys()] choice = click.prompt(SELECT_STAKE, type=click.Choice(indices)) chosen_stake = indexed_stakes[int(choice)] return chosen_stake " 9827,"def remove_initiator(module, array, ini): changed = False if module.check_mode: module.exit_json(changed=changed) try: ini_id = ini['id'] ok = array.remove_initiator( ini_id) if ok: module.log(msg='Initiator {0} removed.'.format(ini_id)) changed = True else: module.fail_json(msg='Initiator {0} remove failed.'.format(ini_id)) except Exception: pass module.exit_json(changed=changed) ","def remove_initiator(module, array, ini): changed = False if module.check_mode: module.exit_json(changed=changed) try: ini_id = ini['id'] ok = array.remove_initiator( ini_id) if ok: module.log(msg='Initiator {0} removed.'.format(ini_id)) changed = True else: raise Exception except Exception: pass module.exit_json(changed=changed) " 42064,"def test_plot_parallel_coordinate() -> None: # Test with no trial. study = create_study() figure = plot_parallel_coordinate(study) assert len(list(figure.get_figure().axes)) == 0 + 1 study = prepare_study_with_trials(with_c_d=False) # Test with a trial. figure = plot_parallel_coordinate(study) assert len(list(figure.get_figure().axes)) == 3 + 1 fig = figure.get_figure() assert fig.axes[1].get_ylabel() == ""Objective Value"" assert fig.axes[1].get_ylim() == (0.0, 2.0) assert len(figure.findobj(LineCollection)) == 1 assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0] assert fig.axes[2].get_ylim() == (1.0, 2.5) assert [ fig.axes[2].get_lines()[0].get_ydata()[0], fig.axes[2].get_lines()[0].get_ydata()[-1], ] == [1.0, 2.5] assert fig.axes[3].get_ylim() == (0.0, 2.0) assert fig.axes[3].get_lines()[0].get_ydata().tolist() == [2.0, 0.0, 1.0] # Test with a trial to select parameter. figure = plot_parallel_coordinate(study, params=[""param_a""]) assert len(list(figure.get_figure().axes)) == 2 + 1 fig = figure.get_figure() assert fig.axes[1].get_ylabel() == ""Objective Value"" assert fig.axes[1].get_ylim() == (0.0, 2.0) assert len(figure.findobj(LineCollection)) == 1 assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0] assert fig.axes[2].get_ylim() == (1.0, 2.5) assert [ fig.axes[2].get_lines()[0].get_ydata()[0], fig.axes[2].get_lines()[0].get_ydata()[-1], ] == [1.0, 2.5] # Test with a customized target value. with pytest.warns(UserWarning): figure = plot_parallel_coordinate( study, params=[""param_a""], target=lambda t: t.params[""param_b""] ) assert len(list(figure.get_figure().axes)) == 2 + 1 fig = figure.get_figure() assert fig.axes[1].get_ylabel() == ""Objective Value"" assert fig.axes[1].get_ylim() == (0.0, 2.0) assert len(figure.findobj(LineCollection)) == 1 assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [2.0, 0.0, 1.0] assert fig.axes[2].get_ylim() == (1.0, 2.5) assert [ fig.axes[2].get_lines()[0].get_ydata()[0], fig.axes[2].get_lines()[0].get_ydata()[-1], ] == [1.0, 2.5] # Test with a customized target name. figure = plot_parallel_coordinate(study, target_name=""Target Name"") assert len(list(figure.get_figure().axes)) == 3 + 1 fig = figure.get_figure() assert fig.axes[1].get_ylabel() == ""Target Name"" assert fig.axes[1].get_ylim() == (0.0, 2.0) assert len(figure.findobj(LineCollection)) == 1 assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0] assert fig.axes[2].get_ylim() == (1.0, 2.5) assert [ fig.axes[2].get_lines()[0].get_ydata()[0], fig.axes[2].get_lines()[0].get_ydata()[-1], ] == [1.0, 2.5] assert fig.axes[3].get_ylim() == (0.0, 2.0) assert fig.axes[3].get_lines()[0].get_ydata().tolist() == [2.0, 0.0, 1.0] # Test with wrong params that do not exist in trials with pytest.raises(ValueError, match=""Parameter optuna does not exist in your study.""): plot_parallel_coordinate(study, params=[""optuna"", ""optuna""]) # Ignore failed trials. def fail_objective(_: Trial) -> float: raise ValueError study = create_study() study.optimize(fail_objective, n_trials=1, catch=(ValueError,)) figure = plot_parallel_coordinate(study) assert len(figure.get_lines()) == 0 ","def test_plot_parallel_coordinate() -> None: # Test with no trial. study = create_study() figure = plot_parallel_coordinate(study) assert len(figure.get_figure().axes) == 0 + 1 study = prepare_study_with_trials(with_c_d=False) # Test with a trial. figure = plot_parallel_coordinate(study) assert len(list(figure.get_figure().axes)) == 3 + 1 fig = figure.get_figure() assert fig.axes[1].get_ylabel() == ""Objective Value"" assert fig.axes[1].get_ylim() == (0.0, 2.0) assert len(figure.findobj(LineCollection)) == 1 assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0] assert fig.axes[2].get_ylim() == (1.0, 2.5) assert [ fig.axes[2].get_lines()[0].get_ydata()[0], fig.axes[2].get_lines()[0].get_ydata()[-1], ] == [1.0, 2.5] assert fig.axes[3].get_ylim() == (0.0, 2.0) assert fig.axes[3].get_lines()[0].get_ydata().tolist() == [2.0, 0.0, 1.0] # Test with a trial to select parameter. figure = plot_parallel_coordinate(study, params=[""param_a""]) assert len(list(figure.get_figure().axes)) == 2 + 1 fig = figure.get_figure() assert fig.axes[1].get_ylabel() == ""Objective Value"" assert fig.axes[1].get_ylim() == (0.0, 2.0) assert len(figure.findobj(LineCollection)) == 1 assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0] assert fig.axes[2].get_ylim() == (1.0, 2.5) assert [ fig.axes[2].get_lines()[0].get_ydata()[0], fig.axes[2].get_lines()[0].get_ydata()[-1], ] == [1.0, 2.5] # Test with a customized target value. with pytest.warns(UserWarning): figure = plot_parallel_coordinate( study, params=[""param_a""], target=lambda t: t.params[""param_b""] ) assert len(list(figure.get_figure().axes)) == 2 + 1 fig = figure.get_figure() assert fig.axes[1].get_ylabel() == ""Objective Value"" assert fig.axes[1].get_ylim() == (0.0, 2.0) assert len(figure.findobj(LineCollection)) == 1 assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [2.0, 0.0, 1.0] assert fig.axes[2].get_ylim() == (1.0, 2.5) assert [ fig.axes[2].get_lines()[0].get_ydata()[0], fig.axes[2].get_lines()[0].get_ydata()[-1], ] == [1.0, 2.5] # Test with a customized target name. figure = plot_parallel_coordinate(study, target_name=""Target Name"") assert len(list(figure.get_figure().axes)) == 3 + 1 fig = figure.get_figure() assert fig.axes[1].get_ylabel() == ""Target Name"" assert fig.axes[1].get_ylim() == (0.0, 2.0) assert len(figure.findobj(LineCollection)) == 1 assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0] assert fig.axes[2].get_ylim() == (1.0, 2.5) assert [ fig.axes[2].get_lines()[0].get_ydata()[0], fig.axes[2].get_lines()[0].get_ydata()[-1], ] == [1.0, 2.5] assert fig.axes[3].get_ylim() == (0.0, 2.0) assert fig.axes[3].get_lines()[0].get_ydata().tolist() == [2.0, 0.0, 1.0] # Test with wrong params that do not exist in trials with pytest.raises(ValueError, match=""Parameter optuna does not exist in your study.""): plot_parallel_coordinate(study, params=[""optuna"", ""optuna""]) # Ignore failed trials. def fail_objective(_: Trial) -> float: raise ValueError study = create_study() study.optimize(fail_objective, n_trials=1, catch=(ValueError,)) figure = plot_parallel_coordinate(study) assert len(figure.get_lines()) == 0 " 39398,"def test_pyvistandarray_assign(sphere): sphere.point_data['data'] = range(sphere.n_points) # this might leave a reference behind if we don't property use the pointer # to the vtk array. sphere.point_data['data'] = sphere.point_data['data'] ","def test_pyvistandarray_assign(sphere): sphere.point_data['data'] = range(sphere.n_points) # this might leave a reference behind if we don't properly use the pointer # to the vtk array. sphere.point_data['data'] = sphere.point_data['data'] " 39402,"def test_polygon(): geom = pyvista.Polygon() assert np.any(geom.points) geom1 = pyvista.Polygon(generate_polygon=True) assert geom.n_cells == 2 geom2 = pyvista.Polygon(generate_polygon=False) assert geom.n_cells == 1 ","def test_polygon(): geom = pyvista.Polygon() assert np.any(geom.points) geom1 = pyvista.Polygon(generate_polygon=True) assert geom1.n_cells == 2 geom2 = pyvista.Polygon(generate_polygon=False) assert geom.n_cells == 1 " 43566,"def SqueezingEmbedding(features, wires, method='amplitude', c=0.1): r""""""Encodes :math:`N` features into the squeezing amplitudes :math:`r \geq 0` or phases :math:`\phi \in [0, 2\pi)` of :math:`M` modes, where :math:`N\leq M`. The mathematical definition of the squeezing gate is given by the operator .. math:: S(z) = \exp\left(\frac{r}{2}\left(e^{-i\phi}\a^2 -e^{i\phi}{\ad}^{2} \right) \right), where :math:`\a` and :math:`\ad` are the bosonic creation and annihilation operators. ``features`` has to be an array of at most ``len(wires)`` floats. If there are fewer entries in ``features`` than wires, the circuit does not apply the remaining squeezing gates. Args: features (array): Array of features of size (N,) wires (Sequence[int]): sequence of mode indices that the template acts on Keyword Args: method (str): ``'phase'`` encodes the input into the phase of single-mode squeezing, while ``'amplitude'`` uses the amplitude c (float): value of the phase of all squeezing gates if ``execution='amplitude'``, or the amplitude of all squeezing gates if ``execution='phase'`` Raises: ValueError: if `features` or `wires` is invalid """""" if not isinstance(wires, Iterable): raise ValueError(""Wires needs to be a list of wires that the embedding uses; got {}."".format(wires)) if len(wires) < len(features): raise ValueError(""Number of features to embed cannot be larger than number of wires, which is {}; "" ""got {}."".format(len(wires), len(features))) for idx, f in enumerate(features): if method == 'amplitude': Squeezing(f, c, wires=wires[idx]) elif method == 'phase': Squeezing(c, f, wires=wires[idx]) else: raise ValueError(""Execution method '{}' not known. Has to be 'phase' or 'amplitude'."".format(method)) ","def SqueezingEmbedding(features, wires, method='amplitude', c=0.1): r""""""Encodes :math:`N` features into the squeezing amplitudes :math:`r \geq 0` or phases :math:`\phi \in [0, 2\pi)` of :math:`M` modes, where :math:`N\leq M`. The mathematical definition of the squeezing gate is given by the operator .. math:: S(z) = \exp\left(\frac{r}{2}\left(e^{-i\phi}\a^2 -e^{i\phi}{\ad}^{2} \right) \right), where :math:`\a` and :math:`\ad` are the bosonic creation and annihilation operators. ``features`` has to be an array of at most ``len(wires)`` floats. If there are fewer entries in ``features`` than wires, the circuit does not apply the remaining squeezing gates. Args: features (array): Array of features of size (N,) wires (Sequence[int]): sequence of mode indices that the template acts on Keyword Args: method (str): ``'phase'`` encodes the input into the phase of single-mode squeezing, while ``'amplitude'`` uses the amplitude c (float): value of the phase of all squeezing gates if ``execution='amplitude'``, or the amplitude of all squeezing gates if ``execution='phase'`` Raises: ValueError: if ``features`` or ``wires`` is invalid """""" if not isinstance(wires, Iterable): raise ValueError(""Wires needs to be a list of wires that the embedding uses; got {}."".format(wires)) if len(wires) < len(features): raise ValueError(""Number of features to embed cannot be larger than number of wires, which is {}; "" ""got {}."".format(len(wires), len(features))) for idx, f in enumerate(features): if method == 'amplitude': Squeezing(f, c, wires=wires[idx]) elif method == 'phase': Squeezing(c, f, wires=wires[idx]) else: raise ValueError(""Execution method '{}' not known. Has to be 'phase' or 'amplitude'."".format(method)) " 22721,"def _prepare_environ(workspace): new_environ = os.environ.copy() new_environ['TMPDIR'] = workspace # So, pytest is nice, and a little to for our usage. # In order to help user to call seamlessly any piece of python code without requiring to # install it as a full-fledged setuptools distribution for instance, it injects the current # path into the PYTHONPATH environment variable. This allows the python interpreter to import # as modules any python file available in current working directory. # See https://docs.pytest.org/en/3.2.5/pythonpath.html for the explanation and description. # However this behavior is not good in integration tests, in particular the nginx oldest ones. # Indeed during these kind of tests certbot is installed as a transitive dependency to # certbot-nginx. Here is the trick: this certbot version is not necessarily the same than # the certbot codebase lying in current working directory. For instance in oldest tests # certbot==0.36.0 may be installed while the codebase corresponds to certbot==0.37.0.dev0. # If at this point PYTHONPATH is set up like pytest does, invoking certbot will import the # modules from the codebase (0.37.0.dev0), not from the required/installed version (0.36.0). # This will lead to funny and totally incomprehensible errors. To avoid that, we ensure that if # PYTHONPATH is set, it does not contain the current working directory. if new_environ.get('PYTHONPATH'): # certbot_integration_tests.__file__ is: # '/path/to/certbot/certbot-ci/certbot_integration_tests/__init__.pyc' # ... and we want '/path/to/certbot' certbot_root = os.path.dirname(os.path.dirname(os.path.dirname(certbot_integration_tests.__file__))) python_paths = [path for path in new_environ['PYTHONPATH'].split(':') if path != certbot_root] new_environ['PYTHONPATH'] = ':'.join(python_paths) return new_environ ","def _prepare_environ(workspace): new_environ = os.environ.copy() new_environ['TMPDIR'] = workspace # So, pytest is nice, and a little to for our usage. # In order to help user to call seamlessly any piece of python code without requiring to # install it as a full-fledged setuptools distribution for instance, it injects the current # path into the PYTHONPATH environment variable. This allows the python interpreter to import # as modules any python file available in current working directory. # See https://docs.pytest.org/en/3.2.5/pythonpath.html for the explanation and description. # However this behavior is not good in integration tests, in particular the nginx oldest ones. # Indeed during these kind of tests certbot is installed as a transitive dependency to # certbot-nginx. Here is the trick: this certbot version is not necessarily the same than # the certbot codebase lying in current working directory. For instance in oldest tests # certbot==0.36.0 may be installed while the codebase corresponds to certbot==0.37.0.dev0. # If at this point PYTHONPATH is set up like pytest does, invoking certbot will import the # modules from the codebase (0.37.0.dev0), not from the required/installed version (0.36.0). # This will lead to funny and totally incomprehensible errors. To avoid that, we ensure that if # PYTHONPATH is set, it does not contain the path to the root of the codebase. if new_environ.get('PYTHONPATH'): # certbot_integration_tests.__file__ is: # '/path/to/certbot/certbot-ci/certbot_integration_tests/__init__.pyc' # ... and we want '/path/to/certbot' certbot_root = os.path.dirname(os.path.dirname(os.path.dirname(certbot_integration_tests.__file__))) python_paths = [path for path in new_environ['PYTHONPATH'].split(':') if path != certbot_root] new_environ['PYTHONPATH'] = ':'.join(python_paths) return new_environ " 45710,"def forecast( R, V, timesteps, n_cascade_levels=6, R_thr=None, extrap_method=""semilagrangian"", decomp_method=""fft"", bandpass_filter_method=""gaussian"", ar_order=2, conditional=False, probmatching_method=""cdf"", num_workers=1, fft_method=""numpy"", domain=""spatial"", extrap_kwargs=None, filter_kwargs=None, measure_time=False, ): """"""Generate a nowcast by using the Spectral Prognosis (S-PROG) method. Parameters ---------- R: array-like Array of shape (ar_order+1,m,n) containing the input precipitation fields ordered by timestamp from oldest to newest. The time steps between the inputs are assumed to be regular. V: array-like Array of shape (2,m,n) containing the x- and y-components of the advection field. The velocities are assumed to represent one time step between the inputs. All values are required to be finite. timesteps: int or list Number of time steps to forecast or a list of time steps for which the forecasts are computed (relative to the input time step). The elements of the list are required to be in ascending order. n_cascade_levels: int, optional The number of cascade levels to use. R_thr: float The threshold value for minimum observable precipitation intensity. extrap_method: str, optional Name of the extrapolation method to use. See the documentation of pysteps.extrapolation.interface. decomp_method: {'fft'}, optional Name of the cascade decomposition method to use. See the documentation of pysteps.cascade.interface. bandpass_filter_method: {'gaussian', 'uniform'}, optional Name of the bandpass filter method to use with the cascade decomposition. See the documentation of pysteps.cascade.interface. ar_order: int, optional The order of the autoregressive model to use. Must be >= 1. conditional: bool, optional If set to True, compute the statistics of the precipitation field conditionally by excluding pixels where the values are below the threshold R_thr. probmatching_method: {'cdf','mean',None}, optional Method for matching the conditional statistics of the forecast field (areas with precipitation intensity above the threshold R_thr) with those of the most recently observed one. 'cdf'=map the forecast CDF to the observed one, 'mean'=adjust only the mean value, None=no matching applied. num_workers: int, optional The number of workers to use for parallel computation. Applicable if dask is enabled or pyFFTW is used for computing the FFT. When num_workers>1, it is advisable to disable OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous threads. fft_method: str, optional A string defining the FFT method to use (see utils.fft.get_method). Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed, the recommended method is 'pyfftw'. domain: {""spatial"", ""spectral""} If ""spatial"", all computations are done in the spatial domain (the classical S-PROG model). If ""spectral"", the AR(2) models are applied directly in the spectral domain to reduce memory footprint and improve performance :cite:`PCH2019a`. extrap_kwargs: dict, optional Optional dictionary containing keyword arguments for the extrapolation method. See the documentation of pysteps.extrapolation. filter_kwargs: dict, optional Optional dictionary containing keyword arguments for the filter method. See the documentation of pysteps.cascade.bandpass_filters.py. measure_time: bool If set to True, measure, print and return the computation time. Returns ------- out: ndarray A three-dimensional array of shape (num_timesteps,m,n) containing a time series of forecast precipitation fields. The time series starts from t0+timestep, where timestep is taken from the input precipitation fields R. If measure_time is True, the return value is a three-element tuple containing the nowcast array, the initialization time of the nowcast generator and the time used in the main loop (seconds). See also -------- pysteps.extrapolation.interface, pysteps.cascade.interface References ---------- :cite:`Seed2003`, :cite:`PCH2019a` """""" _check_inputs(R, V, timesteps, ar_order) if extrap_kwargs is None: extrap_kwargs = dict() if filter_kwargs is None: filter_kwargs = dict() if np.any(~np.isfinite(V)): raise ValueError(""V contains non-finite values"") print(""Computing S-PROG nowcast:"") print(""-------------------------"") print("""") print(""Inputs:"") print(""-------"") print(""input dimensions: %dx%d"" % (R.shape[1], R.shape[2])) print("""") print(""Methods:"") print(""--------"") print(""extrapolation: %s"" % extrap_method) print(""bandpass filter: %s"" % bandpass_filter_method) print(""decomposition: %s"" % decomp_method) print(""conditional statistics: %s"" % (""yes"" if conditional else ""no"")) print(""probability matching: %s"" % probmatching_method) print(""FFT method: %s"" % fft_method) print(""domain: %s"" % domain) print("""") print(""Parameters:"") print(""-----------"") if isinstance(timesteps, int): print(""number of time steps: %d"" % timesteps) else: print(""time steps: %s"" % timesteps) print(""parallel threads: %d"" % num_workers) print(""number of cascade levels: %d"" % n_cascade_levels) print(""order of the AR(p) model: %d"" % ar_order) print(""precip. intensity threshold: %g"" % R_thr) if measure_time: starttime_init = time.time() fft = utils.get_method(fft_method, shape=R.shape[1:], n_threads=num_workers) M, N = R.shape[1:] # initialize the band-pass filter filter_method = cascade.get_method(bandpass_filter_method) filter = filter_method((M, N), n_cascade_levels, **filter_kwargs) decomp_method, recomp_method = cascade.get_method(decomp_method) extrapolator_method = extrapolation.get_method(extrap_method) R = R[-(ar_order + 1) :, :, :].copy() R_min = np.nanmin(R) # determine the domain mask from non-finite values domain_mask = np.logical_or.reduce( [~np.isfinite(R[i, :]) for i in range(R.shape[0])] ) # determine the precipitation threshold mask if conditional: MASK_thr = np.logical_and.reduce( [R[i, :, :] >= R_thr for i in range(R.shape[0])] ) else: MASK_thr = None # initialize the extrapolator x_values, y_values = np.meshgrid(np.arange(R.shape[2]), np.arange(R.shape[1])) xy_coords = np.stack([x_values, y_values]) extrap_kwargs = extrap_kwargs.copy() extrap_kwargs[""xy_coords""] = xy_coords extrap_kwargs[""allow_nonfinite_values""] = True # advect the previous precipitation fields to the same position with the # most recent one (i.e. transform them into the Lagrangian coordinates) res = list() def f(R, i): return extrapolator_method(R[i, :], V, ar_order - i, ""min"", **extrap_kwargs)[-1] for i in range(ar_order): if not DASK_IMPORTED: R[i, :, :] = f(R, i) else: res.append(dask.delayed(f)(R, i)) if DASK_IMPORTED: num_workers_ = len(res) if num_workers > len(res) else num_workers R = np.stack(list(dask.compute(*res, num_workers=num_workers_)) + [R[-1, :, :]]) # replace non-finite values with the minimum value R = R.copy() for i in range(R.shape[0]): R[i, ~np.isfinite(R[i, :])] = np.nanmin(R[i, :]) # compute the cascade decompositions of the input precipitation fields R_d = [] for i in range(ar_order + 1): R_ = decomp_method( R[i, :, :], filter, mask=MASK_thr, fft_method=fft, output_domain=domain, normalize=True, compute_stats=True, compact_output=True, ) R_d.append(R_) # rearrange the cascade levels into a four-dimensional array of shape # (n_cascade_levels,ar_order+1,m,n) for the autoregressive model R_c = nowcast_utils.stack_cascades( R_d, n_cascade_levels, convert_to_full_arrays=True ) # compute lag-l temporal autocorrelation coefficients for each cascade level GAMMA = np.empty((n_cascade_levels, ar_order)) for i in range(n_cascade_levels): if domain == ""spatial"": GAMMA[i, :] = correlation.temporal_autocorrelation(R_c[i], mask=MASK_thr) else: GAMMA[i, :] = correlation.temporal_autocorrelation( R_c[i], domain=""spectral"", x_shape=R.shape[1:] ) R_c = nowcast_utils.stack_cascades( R_d, n_cascade_levels, convert_to_full_arrays=False ) R_d = R_d[-1] nowcast_utils.print_corrcoefs(GAMMA) if ar_order == 2: # adjust the lag-2 correlation coefficient to ensure that the AR(p) # process is stationary for i in range(n_cascade_levels): GAMMA[i, 1] = autoregression.adjust_lag2_corrcoef2(GAMMA[i, 0], GAMMA[i, 1]) # estimate the parameters of the AR(p) model from the autocorrelation # coefficients PHI = np.empty((n_cascade_levels, ar_order + 1)) for i in range(n_cascade_levels): PHI[i, :] = autoregression.estimate_ar_params_yw(GAMMA[i, :]) nowcast_utils.print_ar_params(PHI) # discard all except the p-1 last cascades because they are not needed for # the AR(p) model R_c = [R_c[i][-ar_order:] for i in range(n_cascade_levels)] if probmatching_method == ""mean"": mu_0 = np.mean(R[-1, :, :][R[-1, :, :] >= R_thr]) # compute precipitation mask and wet area ratio MASK_p = R[-1, :, :] >= R_thr war = 1.0 * np.sum(MASK_p) / (R.shape[1] * R.shape[2]) if measure_time: init_time = time.time() - starttime_init R = R[-1, :, :] print(""Starting nowcast computation."") if measure_time: starttime_mainloop = time.time() R_f = [] if isinstance(timesteps, int): timesteps = range(timesteps + 1) timestep_type = ""int"" else: original_timesteps = [0] + list(timesteps) timesteps = nowcast_utils.binned_timesteps(original_timesteps) timestep_type = ""list"" R_f_prev = R extrap_kwargs[""return_displacement""] = True D = None t_nowcast = 0 t_prev = 0.0 # iterate each time step for t in range(len(timesteps)): if timestep_type == ""list"": subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]] else: subtimesteps = [t] if len(subtimesteps) > 1 or t > 0: nowcast_time_step = True else: nowcast_time_step = False if nowcast_time_step: print( ""Computing nowcast for time step %d... "" % (t_nowcast + 1), end="""", flush=True, ) t_nowcast += 1 if measure_time: starttime = time.time() for i in range(n_cascade_levels): R_c[i] = autoregression.iterate_ar_model(R_c[i], PHI[i, :]) R_d[""cascade_levels""] = [R_c[i][-1, :] for i in range(n_cascade_levels)] if domain == ""spatial"": R_d[""cascade_levels""] = np.stack(R_d[""cascade_levels""]) R_f_new = recomp_method(R_d) if domain == ""spectral"": R_f_new = fft.irfft2(R_f_new) MASK = _compute_sprog_mask(R_f_new, war) R_f_new[~MASK] = R_min if probmatching_method == ""cdf"": # adjust the CDF of the forecast to match the most recently # observed precipitation field R_f_new = probmatching.nonparam_match_empirical_cdf(R_f_new, R) elif probmatching_method == ""mean"": mu_fct = np.mean(R_f_new[MASK]) R_f_new[MASK] = R_f_new[MASK] - mu_fct + mu_0 R_f_new[domain_mask] = np.nan # advect the recomposed precipitation field to obtain the forecast for # the current time step (or subtimesteps if non-integer time steps are # given) for t_sub in subtimesteps: if t_sub > 0: t_diff_prev_int = t_sub - int(t_sub) if t_diff_prev_int > 0.0: R_f_ip = ( 1.0 - t_diff_prev_int ) * R_f_prev + t_diff_prev_int * R_f_new else: R_f_ip = R_f_prev t_diff_prev = t_sub - t_prev extrap_kwargs[""displacement_prev""] = D R_f_ep, D = extrapolator_method( R_f_ip, V, [t_diff_prev], **extrap_kwargs, ) R_f.append(R_f_ep[0]) t_prev = t_sub # advect the forecast field by one time step if no subtimesteps in the # current interval were found if len(subtimesteps) == 0: t_diff_prev = t + 1 - t_prev extrap_kwargs[""displacement_prev""] = D _, D = extrapolator_method( None, V, [t_diff_prev], **extrap_kwargs, ) t_prev = t + 1 R_f_prev = R_f_new if nowcast_time_step: if measure_time: print(""%.2f seconds."" % (time.time() - starttime)) else: print(""done."") if measure_time: mainloop_time = time.time() - starttime_mainloop R_f = np.stack(R_f) if measure_time: return R_f, init_time, mainloop_time else: return R_f ","def forecast( R, V, timesteps, n_cascade_levels=6, R_thr=None, extrap_method=""semilagrangian"", decomp_method=""fft"", bandpass_filter_method=""gaussian"", ar_order=2, conditional=False, probmatching_method=""cdf"", num_workers=1, fft_method=""numpy"", domain=""spatial"", extrap_kwargs=None, filter_kwargs=None, measure_time=False, ): """"""Generate a nowcast by using the Spectral Prognosis (S-PROG) method. Parameters ---------- R: array-like Array of shape (ar_order+1,m,n) containing the input precipitation fields ordered by timestamp from oldest to newest. The time steps between the inputs are assumed to be regular. V: array-like Array of shape (2,m,n) containing the x- and y-components of the advection field. The velocities are assumed to represent one time step between the inputs. All values are required to be finite. timesteps: int or list Number of time steps to forecast or a list of time steps for which the forecasts are computed (relative to the input time step). The elements of the list are required to be in ascending order. n_cascade_levels: int, optional The number of cascade levels to use. R_thr: float The threshold value for minimum observable precipitation intensity. extrap_method: str, optional Name of the extrapolation method to use. See the documentation of pysteps.extrapolation.interface. decomp_method: {'fft'}, optional Name of the cascade decomposition method to use. See the documentation of pysteps.cascade.interface. bandpass_filter_method: {'gaussian', 'uniform'}, optional Name of the bandpass filter method to use with the cascade decomposition. See the documentation of pysteps.cascade.interface. ar_order: int, optional The order of the autoregressive model to use. Must be >= 1. conditional: bool, optional If set to True, compute the statistics of the precipitation field conditionally by excluding pixels where the values are below the threshold R_thr. probmatching_method: {'cdf','mean',None}, optional Method for matching the conditional statistics of the forecast field (areas with precipitation intensity above the threshold R_thr) with those of the most recently observed one. 'cdf'=map the forecast CDF to the observed one, 'mean'=adjust only the mean value, None=no matching applied. num_workers: int, optional The number of workers to use for parallel computation. Applicable if dask is enabled or pyFFTW is used for computing the FFT. When num_workers>1, it is advisable to disable OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous threads. fft_method: str, optional A string defining the FFT method to use (see utils.fft.get_method). Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed, the recommended method is 'pyfftw'. domain: {""spatial"", ""spectral""} If ""spatial"", all computations are done in the spatial domain (the classical S-PROG model). If ""spectral"", the AR(2) models are applied directly in the spectral domain to reduce memory footprint and improve performance :cite:`PCH2019a`. extrap_kwargs: dict, optional Optional dictionary containing keyword arguments for the extrapolation method. See the documentation of pysteps.extrapolation. filter_kwargs: dict, optional Optional dictionary containing keyword arguments for the filter method. See the documentation of pysteps.cascade.bandpass_filters.py. measure_time: bool If set to True, measure, print and return the computation time. Returns ------- out: ndarray A three-dimensional array of shape (num_timesteps,m,n) containing a time series of forecast precipitation fields. The time series starts from t0+timestep, where timestep is taken from the input precipitation fields R. If measure_time is True, the return value is a three-element tuple containing the nowcast array, the initialization time of the nowcast generator and the time used in the main loop (seconds). See also -------- pysteps.extrapolation.interface, pysteps.cascade.interface References ---------- :cite:`Seed2003`, :cite:`PCH2019a` """""" _check_inputs(R, V, timesteps, ar_order) if extrap_kwargs is None: extrap_kwargs = dict() if filter_kwargs is None: filter_kwargs = dict() if np.any(~np.isfinite(V)): raise ValueError(""V contains non-finite values"") print(""Computing S-PROG nowcast:"") print(""-------------------------"") print("""") print(""Inputs:"") print(""-------"") print(""input dimensions: %dx%d"" % (R.shape[1], R.shape[2])) print("""") print(""Methods:"") print(""--------"") print(""extrapolation: %s"" % extrap_method) print(""bandpass filter: %s"" % bandpass_filter_method) print(""decomposition: %s"" % decomp_method) print(""conditional statistics: %s"" % (""yes"" if conditional else ""no"")) print(""probability matching: %s"" % probmatching_method) print(""FFT method: %s"" % fft_method) print(""domain: %s"" % domain) print("""") print(""Parameters:"") print(""-----------"") if isinstance(timesteps, int): print(""number of time steps: %d"" % timesteps) else: print(""time steps: %s"" % timesteps) print(""parallel threads: %d"" % num_workers) print(""number of cascade levels: %d"" % n_cascade_levels) print(""order of the AR(p) model: %d"" % ar_order) print(""precip. intensity threshold: %g"" % R_thr) if measure_time: starttime_init = time.time() fft = utils.get_method(fft_method, shape=R.shape[1:], n_threads=num_workers) M, N = R.shape[1:] # initialize the band-pass filter filter_method = cascade.get_method(bandpass_filter_method) filter = filter_method((M, N), n_cascade_levels, **filter_kwargs) decomp_method, recomp_method = cascade.get_method(decomp_method) extrapolator_method = extrapolation.get_method(extrap_method) R = R[-(ar_order + 1) :, :, :].copy() R_min = np.nanmin(R) # determine the domain mask from non-finite values domain_mask = np.logical_or.reduce( [~np.isfinite(R[i, :]) for i in range(R.shape[0])] ) # determine the precipitation threshold mask if conditional: MASK_thr = np.logical_and.reduce( [R[i, :, :] >= R_thr for i in range(R.shape[0])] ) else: MASK_thr = None # initialize the extrapolator x_values, y_values = np.meshgrid(np.arange(R.shape[2]), np.arange(R.shape[1])) xy_coords = np.stack([x_values, y_values]) extrap_kwargs = extrap_kwargs.copy() extrap_kwargs[""xy_coords""] = xy_coords extrap_kwargs[""allow_nonfinite_values""] = True # advect the previous precipitation fields to the same position with the # most recent one (i.e. transform them into the Lagrangian coordinates) res = list() def f(R, i): return extrapolator_method(R[i, :], V, ar_order - i, ""min"", **extrap_kwargs)[-1] for i in range(ar_order): if not DASK_IMPORTED: R[i, :, :] = f(R, i) else: res.append(dask.delayed(f)(R, i)) if DASK_IMPORTED: num_workers_ = len(res) if num_workers > len(res) else num_workers R = np.stack(list(dask.compute(*res, num_workers=num_workers_)) + [R[-1, :, :]]) # replace non-finite values with the minimum value R = R.copy() for i in range(R.shape[0]): R[i, ~np.isfinite(R[i, :])] = np.nanmin(R[i, :]) # compute the cascade decompositions of the input precipitation fields R_d = [] for i in range(ar_order + 1): R_ = decomp_method( R[i, :, :], filter, mask=MASK_thr, fft_method=fft, output_domain=domain, normalize=True, compute_stats=True, compact_output=True, ) R_d.append(R_) # rearrange the cascade levels into a four-dimensional array of shape # (n_cascade_levels,ar_order+1,m,n) for the autoregressive model R_c = nowcast_utils.stack_cascades( R_d, n_cascade_levels, convert_to_full_arrays=True ) # compute lag-l temporal autocorrelation coefficients for each cascade level GAMMA = np.empty((n_cascade_levels, ar_order)) for i in range(n_cascade_levels): if domain == ""spatial"": GAMMA[i, :] = correlation.temporal_autocorrelation(R_c[i], mask=MASK_thr) else: GAMMA[i, :] = correlation.temporal_autocorrelation( R_c[i], domain=""spectral"", x_shape=R.shape[1:] ) R_c = nowcast_utils.stack_cascades( R_d, n_cascade_levels, convert_to_full_arrays=False ) R_d = R_d[-1] nowcast_utils.print_corrcoefs(GAMMA) if ar_order == 2: # adjust the lag-2 correlation coefficient to ensure that the AR(p) # process is stationary for i in range(n_cascade_levels): GAMMA[i, 1] = autoregression.adjust_lag2_corrcoef2(GAMMA[i, 0], GAMMA[i, 1]) # estimate the parameters of the AR(p) model from the autocorrelation # coefficients PHI = np.empty((n_cascade_levels, ar_order + 1)) for i in range(n_cascade_levels): PHI[i, :] = autoregression.estimate_ar_params_yw(GAMMA[i, :]) nowcast_utils.print_ar_params(PHI) # discard all except the p-1 last cascades because they are not needed for # the AR(p) model R_c = [R_c[i][-ar_order:] for i in range(n_cascade_levels)] if probmatching_method == ""mean"": mu_0 = np.mean(R[-1, :, :][R[-1, :, :] >= R_thr]) # compute precipitation mask and wet area ratio MASK_p = R[-1, :, :] >= R_thr war = 1.0 * np.sum(MASK_p) / (R.shape[1] * R.shape[2]) if measure_time: init_time = time.time() - starttime_init R = R[-1, :, :] print(""Starting nowcast computation."") if measure_time: starttime_mainloop = time.time() R_f = [] if isinstance(timesteps, int): timesteps = range(timesteps + 1) timestep_type = ""int"" else: original_timesteps = [0] + list(timesteps) timesteps = nowcast_utils.binned_timesteps(original_timesteps) timestep_type = ""list"" R_f_prev = R extrap_kwargs[""return_displacement""] = True D = None t_nowcast = 0 t_prev = 0.0 # iterate each time step for t in range(len(timesteps)): if timestep_type == ""list"": subtimesteps = [original_timesteps[t_] for t_ in timestep] else: subtimesteps = [t] if len(subtimesteps) > 1 or t > 0: nowcast_time_step = True else: nowcast_time_step = False if nowcast_time_step: print( ""Computing nowcast for time step %d... "" % (t_nowcast + 1), end="""", flush=True, ) t_nowcast += 1 if measure_time: starttime = time.time() for i in range(n_cascade_levels): R_c[i] = autoregression.iterate_ar_model(R_c[i], PHI[i, :]) R_d[""cascade_levels""] = [R_c[i][-1, :] for i in range(n_cascade_levels)] if domain == ""spatial"": R_d[""cascade_levels""] = np.stack(R_d[""cascade_levels""]) R_f_new = recomp_method(R_d) if domain == ""spectral"": R_f_new = fft.irfft2(R_f_new) MASK = _compute_sprog_mask(R_f_new, war) R_f_new[~MASK] = R_min if probmatching_method == ""cdf"": # adjust the CDF of the forecast to match the most recently # observed precipitation field R_f_new = probmatching.nonparam_match_empirical_cdf(R_f_new, R) elif probmatching_method == ""mean"": mu_fct = np.mean(R_f_new[MASK]) R_f_new[MASK] = R_f_new[MASK] - mu_fct + mu_0 R_f_new[domain_mask] = np.nan # advect the recomposed precipitation field to obtain the forecast for # the current time step (or subtimesteps if non-integer time steps are # given) for t_sub in subtimesteps: if t_sub > 0: t_diff_prev_int = t_sub - int(t_sub) if t_diff_prev_int > 0.0: R_f_ip = ( 1.0 - t_diff_prev_int ) * R_f_prev + t_diff_prev_int * R_f_new else: R_f_ip = R_f_prev t_diff_prev = t_sub - t_prev extrap_kwargs[""displacement_prev""] = D R_f_ep, D = extrapolator_method( R_f_ip, V, [t_diff_prev], **extrap_kwargs, ) R_f.append(R_f_ep[0]) t_prev = t_sub # advect the forecast field by one time step if no subtimesteps in the # current interval were found if len(subtimesteps) == 0: t_diff_prev = t + 1 - t_prev extrap_kwargs[""displacement_prev""] = D _, D = extrapolator_method( None, V, [t_diff_prev], **extrap_kwargs, ) t_prev = t + 1 R_f_prev = R_f_new if nowcast_time_step: if measure_time: print(""%.2f seconds."" % (time.time() - starttime)) else: print(""done."") if measure_time: mainloop_time = time.time() - starttime_mainloop R_f = np.stack(R_f) if measure_time: return R_f, init_time, mainloop_time else: return R_f " 31863,"def redlock_get_scan_status(): """""" Get DevOps Scan Status """""" scan_id = demisto.args().get('scan_id', None) response = req('GET', f'iac/v2/scans/{scan_id}/status', param_data={}, data={}) if ( not response or 'data' not in response ): demisto.results('No results found') else: result = response['data'] readable_output = { ""ID"": result.get('id'), ""Status"": result.get('attributes')['status'] } md = tableToMarkdown(""Scan Status:"", readable_output) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'EntryContext': {'Redlock.Scans(val.id == obj.id)': result}, 'HumanReadable': md }) ","def redlock_get_scan_status(): """""" Get DevOps Scan Status """""" scan_id = demisto.args().get('scan_id', None) response = req('GET', f'iac/v2/scans/{scan_id}/status', param_data={}, data={}) if ( not response or 'data' not in response ): demisto.results('No results found') else: result = response.get('data', {}) readable_output = { ""ID"": result.get('id'), ""Status"": result.get('attributes')['status'] } md = tableToMarkdown(""Scan Status:"", readable_output) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'EntryContext': {'Redlock.Scans(val.id == obj.id)': result}, 'HumanReadable': md }) " 34909,"def sequence_mask(data, valid_length=None, mask_value=0, axis=0): """"""Sets all elements outside the expected length of the sequence to a constant value. This function takes an n-dimensional input array of the form [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...] and returns an array of the same shape. `axis` means the axis of the length dimension and can only be 0 or 1. If `axis` is 0, the data must have shape [MAX_LENGTH, batch_size, ...]. Otherwise (axis=1), the data must have shape [batch_size, MAX_LENGTH, ...]. `valid_length` gives the length of each sequence. `valid_length` should be a 1D int array with positive ints and has dimension [batch_size,]. Parameters ---------- data : tvm.Tensor N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...] depending on the value of `axis`. valid_length : tvm.Tensor or None 1-D with shape [batch_size,] mask_value : float, default 0 The masking value, default axis : int, default 0 axis of the length dimension, must be 0 or 1. Returns ------- output : tvm.Tensor N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...] depending on the value of `axis`. """""" assert len(data.shape) >= 2,\ ""only support data.ndim >= 2, received data.shape = {}"".format(data.shape) assert axis == 0 or axis == 1, ""only support axis = 0, 1, received axis = {}"".format(axis) return cpp.sequence_mask(data, valid_length, mask_value, axis) ","def sequence_mask(data, valid_length=None, mask_value=0, axis=0): """"""Sets all elements outside the expected length of the sequence to a constant value. This function takes an n-dimensional input array of the form [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...] and returns an array of the same shape. `axis` means the axis of the length dimension and can only be 0 or 1. If `axis` is 0, the data must have shape [MAX_LENGTH, batch_size, ...]. Otherwise (axis=1), the data must have shape [batch_size, MAX_LENGTH, ...]. `valid_length` gives the length of each sequence. `valid_length` should be a 1D int array with positive ints and has dimension [batch_size,]. Parameters ---------- data : tvm.Tensor N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...] depending on the value of `axis`. valid_length : tvm.Tensor or None 1-D with shape [batch_size,] mask_value : float, default 0 The masking value, default axis : int, optional axis of the length dimension, must be 0 or 1. Returns ------- output : tvm.Tensor N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...] depending on the value of `axis`. """""" assert len(data.shape) >= 2,\ ""only support data.ndim >= 2, received data.shape = {}"".format(data.shape) assert axis == 0 or axis == 1, ""only support axis = 0, 1, received axis = {}"".format(axis) return cpp.sequence_mask(data, valid_length, mask_value, axis) " 28219,"def generate_log_file_name(): """""" Generates the name of the log file based on process id, date, time and PYTHON_LOG_NAME """""" pid = str(os.getpid()) dt_str = datetime.now().strftime(""%y%m%d-"") python_log_name = dt_str + pid + PYTHON_LOG_NAME return python_log_name ","def generate_log_file_name(): """""" Generates the name of the log file based on process id, date, time and PYTHON_LOG_NAME """""" pid = str(os.getpid()) dt_str = datetime.now().strftime(""%y%m%d-"") python_log_name = '-'.join([dt_str, pid, PYTHON_LOG_NAME]) return python_log_name " 31682,"def get_tasks_command(incident_id): response = get_tasks(incident_id) if response: tasks = [] for task in response: task_object = {} incident_name = task['inc_name'] task_object['ID'] = task['id'] task_object['Name'] = task['name'] if task['due_date']: task_object['DueDate'] = normalize_timestamp(task['due_date']) task_object['Status'] = 'Open' if task['status'] == 'O' else 'Closed' task_object['Required'] = task['required'] if task['form']: task_object['Form'] = task['form'] if task['user_notes']: task_object['UserNotes'] = task['user_notes'] task_object['Creator'] = task['creator_principal']['display_name'] task_object['Category'] = task['cat_name'] if task['instr_text']: task_object['Instructions'] = task['instr_text'] tasks.append(task_object) ec = { 'Resilient.Incidents(val.Id && val.Id === obj.Id)': { 'Id': incident_id, 'Name': incident_name, 'Tasks': tasks } } title = 'Incident ' + incident_id + ' tasks' entry = { 'Type': entryTypes['note'], 'Contents': response, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown(title, tasks, ['ID', 'Name', 'Category', 'Form', 'Status', 'DueDate', 'Instructions', 'UserNotes', 'Required', 'Creator']), 'EntryContext': ec } return entry else: return 'No tasks found for this incident.' ","def get_tasks_command(incident_id): response = get_tasks(incident_id) if response: tasks = [] for task in response: task_object = {} incident_name = task['inc_name'] task_object['ID'] = task['id'] task_object['Name'] = task['name'] if task['due_date']: task_object['DueDate'] = normalize_timestamp(task['due_date']) task_object['Status'] = 'Open' if task['status'] == 'O' else 'Closed' task_object['Required'] = task['required'] if task['form']: task_object['Form'] = task['form'] if task['user_notes']: task_object['UserNotes'] = task['user_notes'] task_object['Creator'] = task.get('creator_principal', {}).get('display_name') task_object['Category'] = task['cat_name'] if task['instr_text']: task_object['Instructions'] = task['instr_text'] tasks.append(task_object) ec = { 'Resilient.Incidents(val.Id && val.Id === obj.Id)': { 'Id': incident_id, 'Name': incident_name, 'Tasks': tasks } } title = 'Incident ' + incident_id + ' tasks' entry = { 'Type': entryTypes['note'], 'Contents': response, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown(title, tasks, ['ID', 'Name', 'Category', 'Form', 'Status', 'DueDate', 'Instructions', 'UserNotes', 'Required', 'Creator']), 'EntryContext': ec } return entry else: return 'No tasks found for this incident.' " 32000,"def get_indicator_list(client: MandiantClient, limit: int, first_fetch: str, indicator_type: str) -> List[Dict]: """""" Get list of indicators from given type. Args: client (MandiantClient): client limit (int): number of indicators to return. first_fetch (str): Get indicators newer than first_fetch. indicator_type (str): indicator type Returns: List[Dict]: list of indicators """""" last_run_dict = demisto.getLastRun() indicators_list = last_run_dict.get(indicator_type + 'List', []) if len(indicators_list) < limit: last_run = last_run_dict.get(indicator_type + 'Last', first_fetch) new_indicators_list = get_new_indicators(client, last_run, indicator_type, limit) indicators_list += new_indicators_list if indicators_list: new_indicators_list = indicators_list[:limit] last_run_dict[indicator_type + 'List'] = indicators_list[limit:] date_key = 'last_seen' if indicator_type == 'Indicators' else 'last_updated' last_run_dict[indicator_type + 'Last'] = new_indicators_list[-1][date_key] demisto.setLastRun(last_run_dict) indicators_list = new_indicators_list return indicators_list ","def get_indicator_list(client: MandiantClient, limit: int, first_fetch: str, indicator_type: str) -> List[Dict]: """""" Get list of indicators from given type. Args: client (MandiantClient): client limit (int): number of indicators to return. first_fetch (str): Get indicators newer than first_fetch. indicator_type (str): indicator type Returns: List[Dict]: list of indicators """""" last_run_dict = demisto.getLastRun() indicators_list = last_run_dict.get(indicator_type + 'List', []) if len(indicators_list) < limit: last_run = last_run_dict.get(f'{indicator_type}LastFetch', first_fetch) new_indicators_list = get_new_indicators(client, last_run, indicator_type, limit) indicators_list += new_indicators_list if indicators_list: new_indicators_list = indicators_list[:limit] last_run_dict[indicator_type + 'List'] = indicators_list[limit:] date_key = 'last_seen' if indicator_type == 'Indicators' else 'last_updated' last_run_dict[indicator_type + 'Last'] = new_indicators_list[-1][date_key] demisto.setLastRun(last_run_dict) indicators_list = new_indicators_list return indicators_list " 30998,"def fetch_incidents(client, last_run, fetch_time, mapper_in): """""" This function will execute each interval (default is 1 minute). Args: client: Workday client last_run: The greatest incident created_time we fetched from last fetch fetch_time: The time interval when the function should execute and return events/incidents Returns: last_run: This will be last_run in the next fetch-incidents events: Incidents/Events that will be created in Cortex XSOAR """""" start = datetime.now() events = [] from_date_time = '###' to_date_time = '$$$' try: # If there is no fetch time configured, it will be set to 0 and no events will be pulled fetch_time = int(fetch_time) if fetch_time else 0 time_elapsed_in_minutes, last_run_time = get_time_elapsed(fetch_time, last_run) from_date_time = last_run_time if fetch_time != 0 and time_elapsed_in_minutes >= fetch_time: to_date_time = datetime.now().strftime(WORKDAY_DATE_TIME_FORMAT) report_data = client.get_full_report() report_entries = report_data.get('Report_Entry') for entry in report_entries: workday_user = demisto.mapObject(entry, mapper_in, INCIDENT_TYPE) workday_user = convert_incident_fields_to_cli_names(workday_user) demisto_user = get_demisto_user(workday_user) profile_changed_fields = get_profile_changed_fields(workday_user, demisto_user) terminate_date_arrived = check_if_user_should_be_terminated(workday_user) does_email_exist = does_email_exist_in_xsoar(workday_user.get('email')) if ((demisto_user and len(profile_changed_fields) == 0) or (not demisto_user and does_email_exist))\ and not terminate_date_arrived: # either no change in user profile or user profile doesn't exist but the email is already used # in both cases, don't create the incident continue entry['UserProfile'] = workday_user event = { ""rawJSON"": json.dumps(entry), ""details"": 'Profile changed. Changed fields: ' + str(profile_changed_fields) } events.append(event) last_run_time = datetime.now().strftime(WORKDAY_DATE_TIME_FORMAT) demisto.info(f'Workday Fetch Events Completed. Response Time:' f' {(datetime.now() - start).total_seconds()} seconds') last_run = {'time': last_run_time, ""sync_users"": True} except Exception as e: demisto.error(f'Failed to fetch events. From Date = {from_date_time}. To Date = {to_date_time}') raise e return last_run, events ","def fetch_incidents(client, last_run, fetch_time, mapper_in, report_url): """""" This function will execute each interval (default is 1 minute). Args: client: Workday client last_run: The greatest incident created_time we fetched from last fetch fetch_time: The time interval when the function should execute and return events/incidents Returns: last_run: This will be last_run in the next fetch-incidents events: Incidents/Events that will be created in Cortex XSOAR """""" start = datetime.now() events = [] from_date_time = '###' to_date_time = '$$$' try: # If there is no fetch time configured, it will be set to 0 and no events will be pulled fetch_time = int(fetch_time) if fetch_time else 0 time_elapsed_in_minutes, last_run_time = get_time_elapsed(fetch_time, last_run) from_date_time = last_run_time if fetch_time != 0 and time_elapsed_in_minutes >= fetch_time: to_date_time = datetime.now().strftime(WORKDAY_DATE_TIME_FORMAT) report_data = client.get_full_report() report_entries = report_data.get('Report_Entry') for entry in report_entries: workday_user = demisto.mapObject(entry, mapper_in, INCIDENT_TYPE) workday_user = convert_incident_fields_to_cli_names(workday_user) demisto_user = get_demisto_user(workday_user) profile_changed_fields = get_profile_changed_fields(workday_user, demisto_user) terminate_date_arrived = check_if_user_should_be_terminated(workday_user) does_email_exist = does_email_exist_in_xsoar(workday_user.get('email')) if ((demisto_user and len(profile_changed_fields) == 0) or (not demisto_user and does_email_exist))\ and not terminate_date_arrived: # either no change in user profile or user profile doesn't exist but the email is already used # in both cases, don't create the incident continue entry['UserProfile'] = workday_user event = { ""rawJSON"": json.dumps(entry), ""details"": 'Profile changed. Changed fields: ' + str(profile_changed_fields) } events.append(event) last_run_time = datetime.now().strftime(WORKDAY_DATE_TIME_FORMAT) demisto.info(f'Workday Fetch Events Completed. Response Time:' f' {(datetime.now() - start).total_seconds()} seconds') last_run = {'time': last_run_time, ""sync_users"": True} except Exception as e: demisto.error(f'Failed to fetch events. From Date = {from_date_time}. To Date = {to_date_time}') raise e return last_run, events " 31431,"def check_reviewers(reviewers, pr_author, version, modified_files, pack, pr_number, github_token, verify_ssl): if reviewers: if pr_author != 'xsoar-bot' or version != '1.0.0': pack_files = {file for file in modified_files if file.startswith(PACKS_FOLDER) and Path(file).parts[1] == pack} tag_user_on_pr( reviewers=reviewers, pr_number=pr_number, pack=pack, pack_files=pack_files, github_token=github_token, verify_ssl=verify_ssl ) else: print(f""{pack} pack No reviewers were found."") ","def check_reviewers(reviewers, pr_author, version, modified_files, pack, pr_number, github_token, verify_ssl): if reviewers: if pr_author != 'xsoar-bot' or version != '1.0.0': pack_files = {file for file in modified_files if file.startswith(PACKS_FOLDER) and Path(file).parts[1] == pack} tag_user_on_pr( reviewers=reviewers, pr_number=pr_number, pack=pack, pack_files=pack_files, github_token=github_token, verify_ssl=verify_ssl ) else: print(f'{pack} pack No reviewers were found.') " 36978,"def split_files( files: Dict[str, Any], max_bytes: int = 10 * 1024 * 1024 ) -> Iterable[Dict[str, Dict]]: """""" Splits a files dict (see `files` arg) into smaller dicts of at most `MAX_BYTES` size. This method is used in `FileStreamAPI._send()` to limit the size of post requests sent to wandb server. Arguments: files (dict): `dict` of form {file_name: {'content': ""....."", 'offset': 0}} The key `file_name` can also be mapped to a List [{""offset"": int, ""content"": str}] `max_bytes`: max size for chunk in bytes """""" current_volume: Dict[str, Dict] = {} current_size = 0 def _str_size(x): return len(x) if isinstance(x, bytes) else len(x.encode(""utf-8"")) def _file_size(file): size = file.get(""_size"") if size is None: size = sum(map(_str_size, file[""content""])) file[""_size""] = size return size def _split_file(file, num_lines): offset = file[""offset""] content = file[""content""] name = file[""name""] f1 = {""offset"": offset, ""content"": content[:num_lines], ""name"": name} f2 = { ""offset"": offset + num_lines, ""content"": content[num_lines:], ""name"": name, } return f1, f2 def _num_lines_from_num_bytes(file, num_bytes): size = 0 num_lines = 0 content = file[""content""] while num_lines < len(content): size += _str_size(content[num_lines]) if size > num_bytes: break num_lines += 1 return num_lines files_stack = [] for k, v in files.items(): if type(v) is list: for item in v: files_stack.append( {""name"": k, ""offset"": item[""offset""], ""content"": item[""content""]} ) else: files_stack.append( {""name"": k, ""offset"": v[""offset""], ""content"": v[""content""]} ) while files_stack: f = files_stack.pop() if f[""name""] in current_volume: files_stack.append(f) yield current_volume current_volume = {} current_size = 0 continue # For each file, we have to do 1 of 4 things: # - Add the file as such to the current volume if possible. # - Split the file and add the first part to the current volume and push the second part back onto the stack. # - If that's not possible, check if current volume is empty: # - If empty, add first line of file to current volume and push rest onto stack (This volume will exceed MAX_MB). # - If not, push file back to stack and yield current volume. fsize = _file_size(f) rem = max_bytes - current_size if fsize <= rem: current_volume[f[""name""]] = { ""offset"": f[""offset""], ""content"": f[""content""], } current_size += fsize else: num_lines = _num_lines_from_num_bytes(f, rem) if not num_lines and not current_volume: num_lines = 1 if num_lines: f1, f2 = _split_file(f, num_lines) current_volume[f1[""name""]] = { ""offset"": f1[""offset""], ""content"": f1[""content""], } files_stack.append(f2) yield current_volume current_volume = {} current_size = 0 continue else: files_stack.append(f) yield current_volume current_volume = {} current_size = 0 continue if current_size >= max_bytes: yield current_volume current_volume = {} current_size = 0 continue if current_volume: yield current_volume ","def split_files( files: Dict[str, Any], max_bytes: int = 10 * 1024 * 1024 ) -> Iterable[Dict[str, Dict]]: """""" Splits a files dict (see `files` arg) into smaller dicts of at most `MAX_BYTES` size. This method is used in `FileStreamAPI._send()` to limit the size of post requests sent to wandb server. Arguments: files (dict): `dict` of form {file_name: {'content': ""....."", 'offset': 0}} The key `file_name` can also be mapped to a List [{""offset"": int, ""content"": str}] `max_bytes`: max size for chunk in bytes """""" current_volume: Dict[str, Dict] = {} current_size = 0 def _str_size(x): return len(x) if isinstance(x, bytes) else len(x.encode(""utf-8"")) def _file_size(file): size = file.get(""_size"") if size is None: size = sum(map(_str_size, file[""content""])) file[""_size""] = size return size def _split_file(file, num_lines): offset = file[""offset""] content = file[""content""] name = file[""name""] f1 = {""offset"": offset, ""content"": content[:num_lines], ""name"": name} f2 = { ""offset"": offset + num_lines, ""content"": content[num_lines:], ""name"": name, } return f1, f2 def _num_lines_from_num_bytes(file, num_bytes): size = 0 num_lines = 0 content = file[""content""] while num_lines < len(content): size += _str_size(content[num_lines]) if size > num_bytes: break num_lines += 1 return num_lines files_stack = [] for k, v in files.items(): if isinstance(v, list): for item in v: files_stack.append( {""name"": k, ""offset"": item[""offset""], ""content"": item[""content""]} ) else: files_stack.append( {""name"": k, ""offset"": v[""offset""], ""content"": v[""content""]} ) while files_stack: f = files_stack.pop() if f[""name""] in current_volume: files_stack.append(f) yield current_volume current_volume = {} current_size = 0 continue # For each file, we have to do 1 of 4 things: # - Add the file as such to the current volume if possible. # - Split the file and add the first part to the current volume and push the second part back onto the stack. # - If that's not possible, check if current volume is empty: # - If empty, add first line of file to current volume and push rest onto stack (This volume will exceed MAX_MB). # - If not, push file back to stack and yield current volume. fsize = _file_size(f) rem = max_bytes - current_size if fsize <= rem: current_volume[f[""name""]] = { ""offset"": f[""offset""], ""content"": f[""content""], } current_size += fsize else: num_lines = _num_lines_from_num_bytes(f, rem) if not num_lines and not current_volume: num_lines = 1 if num_lines: f1, f2 = _split_file(f, num_lines) current_volume[f1[""name""]] = { ""offset"": f1[""offset""], ""content"": f1[""content""], } files_stack.append(f2) yield current_volume current_volume = {} current_size = 0 continue else: files_stack.append(f) yield current_volume current_volume = {} current_size = 0 continue if current_size >= max_bytes: yield current_volume current_volume = {} current_size = 0 continue if current_volume: yield current_volume " 3153,"def test_set_column_with_array(): # Case: setting an array as a new column (df[col] = arr) copies that data df = DataFrame({""a"": [1, 2, 3], ""b"": [4, 5, 6]}) arr = np.array([1, 2, 3]) df[""c""] = arr # the array data is copied assert not np.shares_memory(df[""c""].values, arr) # and thus modifying the array does not modify the DataFrame arr[0] = 0 tm.assert_series_equal(df[""c""], Series([1, 2, 3], name=""c"")) ","def test_set_column_with_array(): # Case: setting an array as a new column (df[col] = arr) copies that data df = DataFrame({""a"": [1, 2, 3], ""b"": [4, 5, 6]}) arr = np.array([1, 2, 3], dtype=""int64"") df[""c""] = arr # the array data is copied assert not np.shares_memory(df[""c""].values, arr) # and thus modifying the array does not modify the DataFrame arr[0] = 0 tm.assert_series_equal(df[""c""], Series([1, 2, 3], name=""c"")) " 34313,"def try_plot_hostogram( output_directory: Optional[Text], intent_hist_filename: Optional[Text], intent_results: List[IntentEvaluationResult], ): if output_directory: intent_hist_filename = os.path.join(output_directory, intent_hist_filename) plot_attribute_confidences( intent_results, intent_hist_filename, ""intent_target"", ""intent_prediction"" ) ","def try_plot_histogram( output_directory: Optional[Text], intent_hist_filename: Optional[Text], intent_results: List[IntentEvaluationResult], ): if output_directory: intent_hist_filename = os.path.join(output_directory, intent_hist_filename) plot_attribute_confidences( intent_results, intent_hist_filename, ""intent_target"", ""intent_prediction"" ) " 38313,"def multiplot_yt(ncol, nrow, plots, fields=None, **kwargs): r""""""Wrapper for multiplot that takes a yt PlotWindow Accepts all parameters used in multiplot. Parameters ---------- ncol : integer Number of columns in the figure. nrow : integer Number of rows in the figure. plots : ``PlotWindow`` instance, ``PhasePlot`` instance, or list of plots yt plots to be used. Examples -------- >>> p1 = SlicePlot(ds, 0, 'density') >>> p1.set_width(10, 'kpc') >>> >>> p2 = SlicePlot(ds, 0, 'temperature') >>> p2.set_width(10, 'kpc') >>> p2.set_cmap('temperature', 'hot') >>> >>> sph = ds.sphere(ds.domain_center, (10, 'kpc')) >>> p3 = PhasePlot(sph, 'radius', 'density', 'temperature', ... weight_field='cell_mass') >>> >>> p4 = PhasePlot(sph, 'radius', 'density', 'pressure', 'cell_mass') >>> >>> mp = multiplot_yt(2, 2, [p1, p2, p3, p4], savefig=""yt"", shrink_cb=0.9, ... bare_axes=True, yt_nocbar=False, margins=(0.5,0.5)) """""" # Determine whether the plots are organized in a PlotWindow, or list # of PlotWindows if isinstance(plots, (PlotWindow, PhasePlot)): if fields is None: fields = plots.fields if len(fields) < nrow*ncol: raise RuntimeError(""Number of plots(%d) is less ""\ ""than nrow(%d) x ncol(%d)."" % \ (len(fields), nrow, ncol)) return figure = multiplot(ncol, nrow, yt_plots=plots, fields=fields, **kwargs) elif isinstance(plots, list) and isinstance(plots[0], (PlotWindow, PhasePlot)): if len(plots) < nrow*ncol: raise RuntimeError(""Number of plots(%d) is less ""\ ""than nrow(%d) x ncol(%d)."" % \ (len(fields), nrow, ncol)) return figure = multiplot(ncol, nrow, yt_plots=plots, fields=fields, **kwargs) else: raise RuntimeError(""Unknown plot type in multiplot_yt"") return return figure ","def multiplot_yt(ncol, nrow, plots, fields=None, **kwargs): r""""""Wrapper for multiplot that takes a yt PlotWindow Accepts all parameters used in multiplot. Parameters ---------- ncol : integer Number of columns in the figure. nrow : integer Number of rows in the figure. plots : ``PlotWindow`` instance, ``PhasePlot`` instance, or list of plots yt plots to be used. Examples -------- >>> p1 = SlicePlot(ds, 0, 'density') >>> p1.set_width(10, 'kpc') >>> >>> p2 = SlicePlot(ds, 0, 'temperature') >>> p2.set_width(10, 'kpc') >>> p2.set_cmap('temperature', 'hot') >>> >>> sph = ds.sphere(ds.domain_center, (10, 'kpc')) >>> p3 = PhasePlot(sph, 'radius', 'density', 'temperature', ... weight_field='cell_mass') >>> >>> p4 = PhasePlot(sph, 'radius', 'density', 'pressure', 'cell_mass') >>> >>> mp = multiplot_yt(2, 2, [p1, p2, p3, p4], savefig=""yt"", shrink_cb=0.9, ... bare_axes=True, yt_nocbar=False, margins=(0.5,0.5)) """""" # Determine whether the plots are organized in a PlotWindow, or list # of PlotWindows if isinstance(plots, (PlotWindow, PhasePlot)): if fields is None: fields = plots.fields if len(fields) < nrow*ncol: raise RuntimeError(""Number of plots(%d) is less ""\ ""than nrow(%d) x ncol(%d)."" % \ (len(fields), nrow, ncol)) return figure = multiplot(ncol, nrow, yt_plots=plots, fields=fields, **kwargs) elif isinstance(plots, list) and isinstance(plots[0], (PlotWindow, PhasePlot)): if len(plots) < nrow*ncol: raise RuntimeError(""Number of plots (%d) is less ""\ ""than nrow(%d) x ncol(%d)."" % \ (len(fields), nrow, ncol)) return figure = multiplot(ncol, nrow, yt_plots=plots, fields=fields, **kwargs) else: raise RuntimeError(""Unknown plot type in multiplot_yt"") return return figure " 26239,"def load_test_parameters(duthost): """"""Load test parameters from the json file. Called only once when the module is initialized Args: duthost: The DUT host object """""" global DEFAULT_CABLE_LENGTH_LIST global TESTPARAM_HEADROOM_OVERRIDE global TESTPARAM_LOSSLESS_PG global TESTPARAM_SHARED_HEADROOM_POOL global TESTPARAM_EXTRA_OVERHEAD global TESTPARAM_ADMIN_DOWN global ASIC_TYPE global MAX_SPEED_8LANE_PORT param_file_name = ""qos/files/dynamic_buffer_param.json"" with open(param_file_name) as file: params = json.load(file) logging.info(""Loaded test parameters {} from {}"".format(params, param_file_name)) ASIC_TYPE = duthost.facts['asic_type'] vendor_specific_param = params[ASIC_TYPE] if 'default_cable_length' in vendor_specific_param: DEFAULT_CABLE_LENGTH_LIST = vendor_specific_param['default_cable_length'] if 'headroom-override' in vendor_specific_param: TESTPARAM_HEADROOM_OVERRIDE = vendor_specific_param['headroom-override'] if 'lossless_pg' in vendor_specific_param: TESTPARAM_LOSSLESS_PG = vendor_specific_param['lossless_pg'] if 'shared-headroom-pool' in vendor_specific_param: TESTPARAM_SHARED_HEADROOM_POOL = vendor_specific_param['shared-headroom-pool'] if 'extra_overhead' in vendor_specific_param: TESTPARAM_EXTRA_OVERHEAD = vendor_specific_param['extra_overhead'] if 'admin-down' in vendor_specific_param: TESTPARAM_ADMIN_DOWN = vendor_specific_param['admin-down'] if 'max_speed_8lane_platform' in vendor_specific_param: MAX_SPEED_8LANE_PORT = vendor_specific_param['max_speed_8lane_platform'].get(duthost.facts['platform']) if TESTPARAM_ADMIN_DOWN is not None: # For ingress profile list, we need to check whether the ingress lossy profile exists ingress_lossy_pool = duthost.shell('redis-cli -n 4 keys ""BUFFER_POOL|ingress_lossy_pool""')['stdout'] if ingress_lossy_pool: ingress_profile_list = TESTPARAM_ADMIN_DOWN.get('BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE') ingress_profile_list.append('[BUFFER_PROFILE_TABLE:ingress_lossy_zero_profile]') # 'admin-down' section contains references to buffer profiles # We need to convert the format of the references according to whether table name should be in the reference if not check_qos_db_fv_reference_with_table(duthost): expected_pgs = TESTPARAM_ADMIN_DOWN.get('BUFFER_PG_TABLE') if expected_pgs: new_pgs = {} for pg, profile in expected_pgs.items(): new_pgs[pg] = profile.replace('[BUFFER_PROFILE_TABLE:', '').replace(']', '') TESTPARAM_ADMIN_DOWN['BUFFER_PG_TABLE'] = new_pgs expected_queues = TESTPARAM_ADMIN_DOWN.get('BUFFER_QUEUE_TABLE') if expected_queues: new_queues = {} for queue, profile in expected_queues.items(): new_queues[queue] = profile.replace('[BUFFER_PROFILE_TABLE:', '').replace(']', '') TESTPARAM_ADMIN_DOWN['BUFFER_QUEUE_TABLE'] = new_queues expected_ingress_profile_list = TESTPARAM_ADMIN_DOWN.get('BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE') if expected_ingress_profile_list: new_list = [] for profile in expected_ingress_profile_list: new_list.append(profile.replace('[BUFFER_PROFILE_TABLE:', '').replace(']', '')) TESTPARAM_ADMIN_DOWN['BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE'] = new_list expected_egress_profile_list = TESTPARAM_ADMIN_DOWN.get('BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE') if expected_egress_profile_list: new_list = [] for profile in expected_egress_profile_list: new_list.append(profile.replace('[BUFFER_PROFILE_TABLE:', '').replace(']', '')) TESTPARAM_ADMIN_DOWN['BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE'] = new_list ","def load_test_parameters(duthost): """"""Load test parameters from the json file. Called only once when the module is initialized Args: duthost: The DUT host object """""" global DEFAULT_CABLE_LENGTH_LIST global TESTPARAM_HEADROOM_OVERRIDE global TESTPARAM_LOSSLESS_PG global TESTPARAM_SHARED_HEADROOM_POOL global TESTPARAM_EXTRA_OVERHEAD global TESTPARAM_ADMIN_DOWN global ASIC_TYPE global MAX_SPEED_8LANE_PORT param_file_name = ""qos/files/dynamic_buffer_param.json"" with open(param_file_name) as file: params = json.load(file) logging.info(""Loaded test parameters {} from {}"".format(params, param_file_name)) ASIC_TYPE = duthost.facts['asic_type'] vendor_specific_param = params[ASIC_TYPE] DEFAULT_CABLE_LENGTH_LIST = vendor_specific_param.get('default_cable_length') if 'headroom-override' in vendor_specific_param: TESTPARAM_HEADROOM_OVERRIDE = vendor_specific_param['headroom-override'] if 'lossless_pg' in vendor_specific_param: TESTPARAM_LOSSLESS_PG = vendor_specific_param['lossless_pg'] if 'shared-headroom-pool' in vendor_specific_param: TESTPARAM_SHARED_HEADROOM_POOL = vendor_specific_param['shared-headroom-pool'] if 'extra_overhead' in vendor_specific_param: TESTPARAM_EXTRA_OVERHEAD = vendor_specific_param['extra_overhead'] if 'admin-down' in vendor_specific_param: TESTPARAM_ADMIN_DOWN = vendor_specific_param['admin-down'] if 'max_speed_8lane_platform' in vendor_specific_param: MAX_SPEED_8LANE_PORT = vendor_specific_param['max_speed_8lane_platform'].get(duthost.facts['platform']) if TESTPARAM_ADMIN_DOWN is not None: # For ingress profile list, we need to check whether the ingress lossy profile exists ingress_lossy_pool = duthost.shell('redis-cli -n 4 keys ""BUFFER_POOL|ingress_lossy_pool""')['stdout'] if ingress_lossy_pool: ingress_profile_list = TESTPARAM_ADMIN_DOWN.get('BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE') ingress_profile_list.append('[BUFFER_PROFILE_TABLE:ingress_lossy_zero_profile]') # 'admin-down' section contains references to buffer profiles # We need to convert the format of the references according to whether table name should be in the reference if not check_qos_db_fv_reference_with_table(duthost): expected_pgs = TESTPARAM_ADMIN_DOWN.get('BUFFER_PG_TABLE') if expected_pgs: new_pgs = {} for pg, profile in expected_pgs.items(): new_pgs[pg] = profile.replace('[BUFFER_PROFILE_TABLE:', '').replace(']', '') TESTPARAM_ADMIN_DOWN['BUFFER_PG_TABLE'] = new_pgs expected_queues = TESTPARAM_ADMIN_DOWN.get('BUFFER_QUEUE_TABLE') if expected_queues: new_queues = {} for queue, profile in expected_queues.items(): new_queues[queue] = profile.replace('[BUFFER_PROFILE_TABLE:', '').replace(']', '') TESTPARAM_ADMIN_DOWN['BUFFER_QUEUE_TABLE'] = new_queues expected_ingress_profile_list = TESTPARAM_ADMIN_DOWN.get('BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE') if expected_ingress_profile_list: new_list = [] for profile in expected_ingress_profile_list: new_list.append(profile.replace('[BUFFER_PROFILE_TABLE:', '').replace(']', '')) TESTPARAM_ADMIN_DOWN['BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE'] = new_list expected_egress_profile_list = TESTPARAM_ADMIN_DOWN.get('BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE') if expected_egress_profile_list: new_list = [] for profile in expected_egress_profile_list: new_list.append(profile.replace('[BUFFER_PROFILE_TABLE:', '').replace(']', '')) TESTPARAM_ADMIN_DOWN['BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE'] = new_list " 48513,"def get_parfor_reductions(func_ir, parfor, parfor_params, calltypes, reductions=None, reduce_varnames=None, param_uses=None, param_nodes=None, var_to_param=None): """"""find variables that are updated using their previous values and an array item accessed with parfor index, e.g. s = s+A[i] """""" if reductions is None: reductions = {} if reduce_varnames is None: reduce_varnames = [] # for each param variable, find what other variables are used to update it # also, keep the related nodes if param_uses is None: param_uses = defaultdict(list) if param_nodes is None: param_nodes = defaultdict(list) if var_to_param is None: var_to_param = {} blocks = wrap_parfor_blocks(parfor) topo_order = find_topo_order(blocks) topo_order = topo_order[1:] # ignore init block unwrap_parfor_blocks(parfor) for label in reversed(topo_order): for stmt in reversed(parfor.loop_body[label].body): if (isinstance(stmt, ir.Assign) and (stmt.target.name in parfor_params or stmt.target.name in var_to_param)): lhs = stmt.target rhs = stmt.value cur_param = lhs if lhs.name in parfor_params else var_to_param[lhs.name] used_vars = [] if isinstance(rhs, ir.Var): used_vars = [rhs.name] elif isinstance(rhs, ir.Expr): used_vars = [v.name for v in stmt.value.list_vars()] param_uses[cur_param].extend(used_vars) for v in used_vars: var_to_param[v] = cur_param # save copy of dependent stmt stmt_cp = copy.deepcopy(stmt) if stmt.value in calltypes: calltypes[stmt_cp.value] = calltypes[stmt.value] param_nodes[cur_param].append(stmt_cp) if isinstance(stmt, Parfor): # recursive parfors can have reductions like test_prange8 get_parfor_reductions(func_ir, stmt, parfor_params, calltypes, reductions, reduce_varnames, param_uses, param_nodes, var_to_param) for param, used_vars in param_uses.items(): # a parameter is a reduction variable if its value is used to update it # check reduce_varnames since recursive parfors might have processed # param already param_name = param.name if param_name in used_vars and param_name not in reduce_varnames: param_nodes[param].reverse() reduce_nodes = get_reduce_nodes(param, param_nodes[param], func_ir) # SSA can make things look like reductions except that they don't # reduction operators. If we get to this point but don't find a # reduction operator then assume it is SSA. if reduce_nodes is not None: reduce_varnames.append(param_name) check_conflicting_reduction_operators(param, reduce_nodes) gri_out = guard(get_reduction_init, reduce_nodes) if gri_out is not None: init_val, redop = gri_out else: init_val = None redop = None reductions[param_name] = (init_val, reduce_nodes, redop) return reduce_varnames, reductions ","def get_parfor_reductions(func_ir, parfor, parfor_params, calltypes, reductions=None, reduce_varnames=None, param_uses=None, param_nodes=None, var_to_param=None): """"""find variables that are updated using their previous values and an array item accessed with parfor index, e.g. s = s+A[i] """""" if reductions is None: reductions = {} if reduce_varnames is None: reduce_varnames = [] # for each param variable, find what other variables are used to update it # also, keep the related nodes if param_uses is None: param_uses = defaultdict(list) if param_nodes is None: param_nodes = defaultdict(list) if var_to_param is None: var_to_param = {} blocks = wrap_parfor_blocks(parfor) topo_order = find_topo_order(blocks) topo_order = topo_order[1:] # ignore init block unwrap_parfor_blocks(parfor) for label in reversed(topo_order): for stmt in reversed(parfor.loop_body[label].body): if (isinstance(stmt, ir.Assign) and (stmt.target.name in parfor_params or stmt.target.name in var_to_param)): lhs = stmt.target rhs = stmt.value cur_param = lhs if lhs.name in parfor_params else var_to_param[lhs.name] used_vars = [] if isinstance(rhs, ir.Var): used_vars = [rhs.name] elif isinstance(rhs, ir.Expr): used_vars = [v.name for v in stmt.value.list_vars()] param_uses[cur_param].extend(used_vars) for v in used_vars: var_to_param[v] = cur_param # save copy of dependent stmt stmt_cp = copy.deepcopy(stmt) if stmt.value in calltypes: calltypes[stmt_cp.value] = calltypes[stmt.value] param_nodes[cur_param].append(stmt_cp) if isinstance(stmt, Parfor): # recursive parfors can have reductions like test_prange8 get_parfor_reductions(func_ir, stmt, parfor_params, calltypes, reductions, reduce_varnames, param_uses, param_nodes, var_to_param) for param, used_vars in param_uses.items(): # a parameter is a reduction variable if its value is used to update it # check reduce_varnames since recursive parfors might have processed # param already param_name = param.name if param_name in used_vars and param_name not in reduce_varnames: param_nodes[param].reverse() reduce_nodes = get_reduce_nodes(param, param_nodes[param], func_ir) # SSA can make things look like reductions except that they don't # have reduction operators. If we get to this point but don't find a # reduction operator then assume it is SSA. if reduce_nodes is not None: reduce_varnames.append(param_name) check_conflicting_reduction_operators(param, reduce_nodes) gri_out = guard(get_reduction_init, reduce_nodes) if gri_out is not None: init_val, redop = gri_out else: init_val = None redop = None reductions[param_name] = (init_val, reduce_nodes, redop) return reduce_varnames, reductions " 2671,"def calibration_curve( y_true, y_prob, *, pos_label=None, normalize=""deprecated"", n_bins=5, strategy=""uniform"", ): """"""Compute true and predicted probabilities for a calibration curve. The method assumes the inputs come from a binary classifier, and discretize the [0, 1] interval into bins. Calibration curves may also be referred to as reliability diagrams. Read more in the :ref:`User Guide `. Parameters ---------- y_true : array-like of shape (n_samples,) True targets. y_prob : array-like of shape (n_samples,) Probabilities of the positive class. pos_label : int or str, default=None The label of the positive class. .. versionadded:: 1.1 normalize : bool, default=""deprecated"" Whether y_prob needs to be normalized into the [0, 1] interval, i.e. is not a proper probability. If True, the smallest value in y_prob is linearly mapped onto 0 and the largest one onto 1. .. deprecated:: 1.1 The normalize argument is deprecated in v1.1 and will be removed in v1.3. Explicitly normalizing y_prob will reproduce this behavior, but it is recommended that a proper probability is used (i.e. a classifier's `predict_proba` positive class or `decision_function` output calibrated with `CalibratedClassifierCV`). n_bins : int, default=5 Number of bins to discretize the [0, 1] interval. A bigger number requires more data. Bins with no samples (i.e. without corresponding values in `y_prob`) will not be returned, thus the returned arrays may have less than `n_bins` values. strategy : {'uniform', 'quantile'}, default='uniform' Strategy used to define the widths of the bins. uniform The bins have identical widths. quantile The bins have the same number of samples and depend on `y_prob`. Returns ------- prob_true : ndarray of shape (n_bins,) or smaller The proportion of samples whose class is the positive class, in each bin (fraction of positives). prob_pred : ndarray of shape (n_bins,) or smaller The mean predicted probability in each bin. References ---------- Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good Probabilities With Supervised Learning, in Proceedings of the 22nd International Conference on Machine Learning (ICML). See section 4 (Qualitative Analysis of Predictions). Examples -------- >>> import numpy as np >>> from sklearn.calibration import calibration_curve >>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1]) >>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.]) >>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3) >>> prob_true array([0. , 0.5, 1. ]) >>> prob_pred array([0.2 , 0.525, 0.85 ]) """""" y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) check_consistent_length(y_true, y_prob) pos_label = _check_pos_label_consistency(pos_label, y_true) # TODO(1.3): Remove normalize conditional block. if normalize != ""deprecated"": warnings.warn( ""The normalize argument is deprecated in v1.1 and will be removed in v1.3."" "" Explicitly normalizing y_prob will reproduce this behavior, but it is"" "" recommended that a proper probability is used (i.e. a classifier's"" "" `predict_proba` positive class or `decision_function` output calibrated"" "" with `CalibratedClassifierCV`)."", FutureWarning, ) if normalize: # Normalize predicted values into interval [0, 1] y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min()) if y_prob.min() < 0 or y_prob.max() > 1: raise ValueError(""y_prob has values outside [0, 1]."") labels = np.unique(y_true) if len(labels) > 2: raise ValueError( f""Only binary classification is supported. Provided labels {labels}."" ) y_true = y_true == pos_label if strategy == ""quantile"": # Determine bin edges by distribution of data quantiles = np.linspace(0, 1, n_bins + 1) bins = np.percentile(y_prob, quantiles * 100) elif strategy == ""uniform"": bins = np.linspace(0.0, 1.0, n_bins + 1) else: raise ValueError( ""Invalid entry to 'strategy' input. Strategy "" ""must be either 'quantile' or 'uniform'."" ) binids = np.searchsorted(bins[1:-1], y_prob) bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins)) bin_true = np.bincount(binids, weights=y_true, minlength=len(bins)) bin_total = np.bincount(binids, minlength=len(bins)) nonzero = bin_total != 0 prob_true = bin_true[nonzero] / bin_total[nonzero] prob_pred = bin_sums[nonzero] / bin_total[nonzero] return prob_true, prob_pred ","def calibration_curve( y_true, y_prob, *, pos_label=None, normalize=""deprecated"", n_bins=5, strategy=""uniform"", ): """"""Compute true and predicted probabilities for a calibration curve. The method assumes the inputs come from a binary classifier, and discretize the [0, 1] interval into bins. Calibration curves may also be referred to as reliability diagrams. Read more in the :ref:`User Guide `. Parameters ---------- y_true : array-like of shape (n_samples,) True targets. y_prob : array-like of shape (n_samples,) Probabilities of the positive class. pos_label : int or str, default=None The label of the positive class. .. versionadded:: 1.1 normalize : bool, default=""deprecated"" Whether y_prob needs to be normalized into the [0, 1] interval, i.e. is not a proper probability. If True, the smallest value in y_prob is linearly mapped onto 0 and the largest one onto 1. .. deprecated:: 1.1 The normalize argument is deprecated in v1.1 and will be removed in v1.3. Explicitly normalizing `y_prob` will reproduce this behavior, but it is recommended that a proper probability is used (i.e. a classifier's `predict_proba` positive class or `decision_function` output calibrated with `CalibratedClassifierCV`). n_bins : int, default=5 Number of bins to discretize the [0, 1] interval. A bigger number requires more data. Bins with no samples (i.e. without corresponding values in `y_prob`) will not be returned, thus the returned arrays may have less than `n_bins` values. strategy : {'uniform', 'quantile'}, default='uniform' Strategy used to define the widths of the bins. uniform The bins have identical widths. quantile The bins have the same number of samples and depend on `y_prob`. Returns ------- prob_true : ndarray of shape (n_bins,) or smaller The proportion of samples whose class is the positive class, in each bin (fraction of positives). prob_pred : ndarray of shape (n_bins,) or smaller The mean predicted probability in each bin. References ---------- Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good Probabilities With Supervised Learning, in Proceedings of the 22nd International Conference on Machine Learning (ICML). See section 4 (Qualitative Analysis of Predictions). Examples -------- >>> import numpy as np >>> from sklearn.calibration import calibration_curve >>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1]) >>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.]) >>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3) >>> prob_true array([0. , 0.5, 1. ]) >>> prob_pred array([0.2 , 0.525, 0.85 ]) """""" y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) check_consistent_length(y_true, y_prob) pos_label = _check_pos_label_consistency(pos_label, y_true) # TODO(1.3): Remove normalize conditional block. if normalize != ""deprecated"": warnings.warn( ""The normalize argument is deprecated in v1.1 and will be removed in v1.3."" "" Explicitly normalizing y_prob will reproduce this behavior, but it is"" "" recommended that a proper probability is used (i.e. a classifier's"" "" `predict_proba` positive class or `decision_function` output calibrated"" "" with `CalibratedClassifierCV`)."", FutureWarning, ) if normalize: # Normalize predicted values into interval [0, 1] y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min()) if y_prob.min() < 0 or y_prob.max() > 1: raise ValueError(""y_prob has values outside [0, 1]."") labels = np.unique(y_true) if len(labels) > 2: raise ValueError( f""Only binary classification is supported. Provided labels {labels}."" ) y_true = y_true == pos_label if strategy == ""quantile"": # Determine bin edges by distribution of data quantiles = np.linspace(0, 1, n_bins + 1) bins = np.percentile(y_prob, quantiles * 100) elif strategy == ""uniform"": bins = np.linspace(0.0, 1.0, n_bins + 1) else: raise ValueError( ""Invalid entry to 'strategy' input. Strategy "" ""must be either 'quantile' or 'uniform'."" ) binids = np.searchsorted(bins[1:-1], y_prob) bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins)) bin_true = np.bincount(binids, weights=y_true, minlength=len(bins)) bin_total = np.bincount(binids, minlength=len(bins)) nonzero = bin_total != 0 prob_true = bin_true[nonzero] / bin_total[nonzero] prob_pred = bin_sums[nonzero] / bin_total[nonzero] return prob_true, prob_pred " 5760,"def tf2sos(b, a, pairing=None, analog=False): """""" Return second-order sections from transfer function representation Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional The method to use to combine pairs of poles and zeros into sections. See `zpk2sos` for information and restrictions on `pairing` and `analog` arguments. analog : bool, optional If True, system is analog, otherwise discrete. .. versionadded:: 1.8.0 Returns ------- sos : ndarray Array of second-order filter coefficients, with shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. See Also -------- zpk2sos, sosfilt Notes ----- It is generally discouraged to convert from TF to SOS format, since doing so usually will not improve numerical precision errors. Instead, consider designing filters in ZPK format and converting directly to SOS. TF is converted to SOS by first converting to ZPK format, then converting ZPK to SOS. .. versionadded:: 0.16.0 """""" return zpk2sos(*tf2zpk(b, a), pairing=pairing, analog=analog) ","def tf2sos(b, a, pairing=None, *, analog=False): """""" Return second-order sections from transfer function representation Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional The method to use to combine pairs of poles and zeros into sections. See `zpk2sos` for information and restrictions on `pairing` and `analog` arguments. analog : bool, optional If True, system is analog, otherwise discrete. .. versionadded:: 1.8.0 Returns ------- sos : ndarray Array of second-order filter coefficients, with shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. See Also -------- zpk2sos, sosfilt Notes ----- It is generally discouraged to convert from TF to SOS format, since doing so usually will not improve numerical precision errors. Instead, consider designing filters in ZPK format and converting directly to SOS. TF is converted to SOS by first converting to ZPK format, then converting ZPK to SOS. .. versionadded:: 0.16.0 """""" return zpk2sos(*tf2zpk(b, a), pairing=pairing, analog=analog) " 50077,"def countstat_current(L, c_ops=None, rhoss=None, J_ops=None): """""" Calculate the current corresponding a system Liouvillian `L` and a list of current collapse operators `c_ops` or current superoperators `J_ops` (either must be specified). Optionally the steadystate density matrix `rhoss` and a list of current superoperators `J_ops` can be specified. If either of these are omitted they are computed internally. Parameters ---------- L : :class:`qutip.Qobj` Qobj representing the system Liouvillian. c_ops : array / list (optional) List of current collapse operators. rhoss : :class:`qutip.Qobj` (optional) The steadystate density matrix corresponding the system Liouvillian `L`. J_ops : array / list (optional) List of current superoperators. Returns -------- I : array The currents `I` corresponding to each current collapse operator `c_ops` (or, equivalently, each current superopeator `J_ops`). """""" if J_ops is None: if c_ops is None: raise ValueError(""c_ops must be given if J_ops is not"") J_ops = [sprepost(c, c.dag()) for c in c_ops] if rhoss is None: if c_ops is None: raise ValueError(""c_ops must be given if rhoss is not"") rhoss = steadystate(L, c_ops) rhoss_vec = _data.column_stack(rhoss.data.copy()) N = len(J_ops) current = np.zeros(N) for i, Ji in enumerate(J_ops): current[i] = _data.expect_super(Ji.data, rhoss_vec).real return current ","def countstat_current(L, c_ops=None, rhoss=None, J_ops=None): """""" Calculate the current corresponding to a system Liouvillian ``L`` and a list of current collapse operators ``c_ops`` or current superoperators ``J_ops``. Parameters ---------- L : :class:`qutip.Qobj` Qobj representing the system Liouvillian. c_ops : array / list (optional) List of current collapse operators. Required if either ``rhoss`` or ``J_ops`` is not given. rhoss : :class:`qutip.Qobj` (optional) The steadystate density matrix for the given system Liouvillian ``L`` and collapse operators. If not given, it defaults to ``steadystate(L, c_ops)``. J_ops : array / list (optional) List of current superoperators. If not given, they default to ``sprepost(c, c.dag())`` for each ``c`` from ``c_ops``. Returns -------- I : array The currents ``I`` corresponding to each current collapse operator ``J_ops`` (or to each ``c_ops`` if ``J_ops`` was not given). """""" if J_ops is None: if c_ops is None: raise ValueError(""c_ops must be given if J_ops is not"") J_ops = [sprepost(c, c.dag()) for c in c_ops] if rhoss is None: if c_ops is None: raise ValueError(""c_ops must be given if rhoss is not"") rhoss = steadystate(L, c_ops) rhoss_vec = _data.column_stack(rhoss.data.copy()) N = len(J_ops) current = np.zeros(N) for i, Ji in enumerate(J_ops): current[i] = _data.expect_super(Ji.data, rhoss_vec).real return current " 27596,"def init(): init_logging(debug=literal_eval(os.environ.get('WR_DEBUG', 'True'))) config = load_wr_config() wr = WebRecRecorder(config) spawn_once(wr.msg_listen_loop) wr.init_app() wr.app.wr = wr return wr.app ","def init(): init_logging(debug=get_bool(os.environ.get('WR_DEBUG', 'True'))) config = load_wr_config() wr = WebRecRecorder(config) spawn_once(wr.msg_listen_loop) wr.init_app() wr.app.wr = wr return wr.app " 28552,"def generate_dims_coords( shape, var_name, dims=None, coords=None, default_dims=None, index_origin=None ): """"""Generate default dimensions and coordinates for a variable. Parameters ---------- shape : tuple[int] Shape of the variable var_name : str Name of the variable. If no dimension name(s) is provided, ArviZ will generate a default dimension name using ``var_name``, e.g., ``""foo_dim_0""`` for the first dimension if ``var_name`` is ``""foo""``. dims : list List of dimensions for the variable coords : dict[str] -> list[str] Map of dimensions to coordinates default_dims : list[str] Dimension names that are not part of the variable's shape. For example, when manipulating Monte Carlo traces, the ``default_dims`` would be ``[""chain"" , ""draw""]`` which ArviZ uses as its own names for dimensions of MCMC traces. index_origin : int, optional Starting value of integer coordinate values. Defaults to the value in rcParam ``data.index_origin``. Returns ------- list[str] Default dims dict[str] -> list[str] Default coords """""" if index_origin is None: index_origin = rcParams[""data.index_origin""] if default_dims is None: default_dims = [] if dims is None: dims = [] if len([dim for dim in dims if dim not in default_dims]) > len(shape): warnings.warn( ( ""In variable {var_name}, there are "" + ""more dims ({dims_len}) given than exist ({shape_len}). "" + ""Passed array should have shape ({defaults}*shape)"" ).format( var_name=var_name, dims_len=len(dims), shape_len=len(shape), defaults="","".join(default_dims) + "", "" if default_dims is not None else """", ), UserWarning, ) if coords is None: coords = {} coords = deepcopy(coords) dims = deepcopy(dims) for idx, dim_len in enumerate(shape): if (len(dims) < idx + 1) or (dims[idx] is None): dim_name = ""{var_name}_dim_{idx}"".format(var_name=var_name, idx=idx) if len(dims) < idx + 1: dims.append(dim_name) else: dims[idx] = dim_name dim_name = dims[idx] if dim_name not in coords: coords[dim_name] = utils.arange(dim_len) + index_origin coords = {key: coord for key, coord in coords.items() if any(key == dim for dim in dims)} return dims, coords ","def generate_dims_coords( shape, var_name, dims=None, coords=None, default_dims=None, index_origin=None ): """"""Generate default dimensions and coordinates for a variable. Parameters ---------- shape : tuple[int] Shape of the variable var_name : str Name of the variable. If no dimension name(s) is provided, ArviZ will generate a default dimension name using ``var_name``, e.g., ``""foo_dim_0""`` for the first dimension if ``var_name`` is ``""foo""``. dims : list List of dimensions for the variable coords : dict[str] -> list[str] Map of dimensions to coordinates default_dims : list[str] Dimension names that are not part of the variable's shape. For example, when manipulating Monte Carlo traces, the ``default_dims`` would be ``[""chain"" , ""draw""]`` which ArviZ uses as its own names for dimensions of MCMC traces. index_origin : int, optional Starting value of integer coordinate values. Defaults to the value in rcParam ``data.index_origin``. Returns ------- list[str] Default dims dict[str] -> list[str] Default coords """""" if index_origin is None: index_origin = rcParams[""data.index_origin""] if default_dims is None: default_dims = [] if dims is None: dims = [] if len([dim for dim in dims if dim not in default_dims]) > len(shape): warnings.warn( ( ""In variable {var_name}, there are "" + ""more dims ({dims_len}) given than exist ({shape_len}). "" + ""Passed array should have shape ({defaults}*shape)"" ).format( var_name=var_name, dims_len=len(dims), shape_len=len(shape), defaults="","".join(default_dims) + "", "" if default_dims is not None else """", ), UserWarning, ) if coords is None: coords = {} coords = deepcopy(coords) dims = deepcopy(dims) for idx, dim_len in enumerate(shape): if (len(dims) < idx + 1) or (dims[idx] is None): dim_name = ""{var_name}_dim_{idx}"".format(var_name=var_name, idx=idx) if len(dims) < idx + 1: dims.append(dim_name) else: dims[idx] = dim_name dim_name = dims[idx] if dim_name not in coords: coords[dim_name] = utils.arange(index_origin, dim_len+index_origin) coords = {key: coord for key, coord in coords.items() if any(key == dim for dim in dims)} return dims, coords " 39123,"def rotate_service_account_keys_in_secret_manager(event, context): event_type = event[""attributes""][""eventType""] secret_name = event[""attributes""][""secretId""] if (event_type == ""SECRET_ROTATE""): client = secretmanager.SecretManagerServiceClient() current_secret_with_version = client.list_secret_versions( request={ ""parent"": secret_name }).versions[0].name current_secret = client.access_secret_version( name=current_secret_with_version) ### Get project_id, service account and private key id json_key = json.loads(current_secret.payload.data.decode('UTF-8')) project_id = json_key['project_id'] service_account = json_key['client_email'] key_id = json_key['private_key_id'] service = googleapiclient.discovery.build('iam', 'v1') ### Create a new service account and add a key as a new secret version new_key = service.projects().serviceAccounts().keys().create( name='projects/' + project_id + '/serviceAccounts/' + service_account, body={}).execute() new_service_key_json = base64.b64decode(new_key['privateKeyData']) client.add_secret_version(parent=secret_name, payload={'data': new_service_key_json}) ### Delete the old service account key_to_delete = ""projects/{project_id}/serviceAccounts/{service_account}/keys/{key_id}"".format( project_id=project_id, service_account=service_account, key_id=key_id) service.projects().serviceAccounts().keys().delete( name=key_to_delete).execute() ### Disable and delete the old secret version client.disable_secret_version( request={""name"": current_secret_with_version}) client.destroy_secret_version( request={""name"": current_secret_with_version}) ","def rotate_service_account_keys_in_secret_manager(event, context): event_type = event[""attributes""][""eventType""] secret_name = event[""attributes""][""secretId""] if (event_type == ""SECRET_ROTATE""): client = secretmanager.SecretManagerServiceClient() current_secret_with_version = client.list_secret_versions( request={ ""parent"": secret_name }).versions[0].name current_secret = client.access_secret_version( name=current_secret_with_version) ### Get project_id, service account and private key id json_key = json.loads(current_secret.payload.data.decode('UTF-8')) project_id = json_key['project_id'] service_account = json_key['client_email'] key_id = json_key['private_key_id'] service = googleapiclient.discovery.build('iam', 'v1') ### Create a new service account and add a key as a new secret version new_key = service.projects().serviceAccounts().keys().create( name=f'projects/{project_id}/serviceAccounts/{service_account}', body={}).execute() new_service_key_json = base64.b64decode(new_key['privateKeyData']) client.add_secret_version(parent=secret_name, payload={'data': new_service_key_json}) ### Delete the old service account key_to_delete = ""projects/{project_id}/serviceAccounts/{service_account}/keys/{key_id}"".format( project_id=project_id, service_account=service_account, key_id=key_id) service.projects().serviceAccounts().keys().delete( name=key_to_delete).execute() ### Disable and delete the old secret version client.disable_secret_version( request={""name"": current_secret_with_version}) client.destroy_secret_version( request={""name"": current_secret_with_version}) " 43380,"def CVNeuralNet(weights, wires=None): """"""pennylane.template.CVNeuralNet(weights, wires) A CV Quantum Neural Network Implements the CV Quantum Neural Network (CVQNN) architecture from :cite:`killoran2018continuous` for an arbitrary number of wires and layers. The weights parameter is nested list. Its first dimension is equal to the number of layers. Each entry is again a list that contains the parameters feeding into :func:`CVNeuralNetLayer`. Args: weights (array[array]): array of arrays of weights for each layer of the CV neural network Keyword Args: wires (Sequence[int]): wires the CVQNN should act on """""" for layer_weights in weights: CVNeuralNetLayer(*layer_weights, wires=wires) ","def CVNeuralNet(weights, wires=None): """"""pennylane.template.CVNeuralNet(weights, wires) A CV Quantum Neural Network Implements the CV Quantum Neural Network (CVQNN) architecture from :cite:`killoran2018continuous` for an arbitrary number of wires and layers. The weights parameter is a nested list. Its first dimension is equal to the number of layers. Each entry is again a list that contains the parameters feeding into :func:`CVNeuralNetLayer`. Args: weights (array[array]): array of arrays of weights for each layer of the CV neural network Keyword Args: wires (Sequence[int]): wires the CVQNN should act on """""" for layer_weights in weights: CVNeuralNetLayer(*layer_weights, wires=wires) " 36356,"def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, module=None, qualname=None): """"""Return a new dynamically created dataclass. The dataclass name will be 'cls_name'. 'fields' is an iterable of either (name), (name, type) or (name, type, Field) objects. If type is omitted, use the string 'typing.Any'. Field objects are created by the equivalent of calling 'field(name, type [, Field-info])'. C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,)) is equivalent to: @dataclass class C(Base): x: 'typing.Any' y: int z: int = field(init=False) For the bases and namespace parameters, see the builtin type() function. 'module' should be set to the module this class is being created in; if it is not set, an attempt to find that module will be made, but if it fails the class will not be picklable. 'qualname' should be set to the actual location this call can be found in its module; by default it is set to the global scope. If this is not correct, pickle will fail in some circumstances. The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to dataclass(). """""" if namespace is None: namespace = {} else: # Copy namespace since we're going to mutate it. namespace = namespace.copy() # While we're looking through the field names, validate that they # are identifiers, are not keywords, and not duplicates. seen = set() anns = {} for item in fields: if isinstance(item, str): name = item tp = 'typing.Any' elif len(item) == 2: name, tp, = item elif len(item) == 3: name, tp, spec = item namespace[name] = spec else: raise TypeError(f'Invalid field: {item!r}') if not isinstance(name, str) or not name.isidentifier(): raise TypeError(f'Field names must be valid identifers: {name!r}') if keyword.iskeyword(name): raise TypeError(f'Field names must not be keywords: {name!r}') if name in seen: raise TypeError(f'Field name duplicated: {name!r}') seen.add(name) anns[name] = tp namespace['__annotations__'] = anns # We use `types.new_class()` instead of simply `type()` to allow dynamic creation # of generic dataclassses. cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace)) # TODO: this hack is the same that can be found in enum.py and should be # removed if there ever is a way to get the caller module. if module is None: try: module = sys._getframe(1).f_globals['__name__'] except (AttributeError, ValueError): pass if module is None: _make_class_unpicklable(cls) else: cls.__module__ = module if qualname is not None: cls.__qualname__ = qualname return dataclass(cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen) ","def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, module=None, qualname=None): """"""Return a new dynamically created dataclass. The dataclass name will be 'cls_name'. 'fields' is an iterable of either (name), (name, type) or (name, type, Field) objects. If type is omitted, use the string 'typing.Any'. Field objects are created by the equivalent of calling 'field(name, type [, Field-info])'. C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,)) is equivalent to: @dataclass class C(Base): x: 'typing.Any' y: int z: int = field(init=False) For the bases and namespace parameters, see the built-in type() function. 'module' should be set to the module this class is being created in; if it is not set, an attempt to find that module will be made, but if it fails the class will not be picklable. 'qualname' should be set to the actual location this call can be found in its module; by default it is set to the global scope. If this is not correct, pickle will fail in some circumstances. The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to dataclass(). """""" if namespace is None: namespace = {} else: # Copy namespace since we're going to mutate it. namespace = namespace.copy() # While we're looking through the field names, validate that they # are identifiers, are not keywords, and not duplicates. seen = set() anns = {} for item in fields: if isinstance(item, str): name = item tp = 'typing.Any' elif len(item) == 2: name, tp, = item elif len(item) == 3: name, tp, spec = item namespace[name] = spec else: raise TypeError(f'Invalid field: {item!r}') if not isinstance(name, str) or not name.isidentifier(): raise TypeError(f'Field names must be valid identifers: {name!r}') if keyword.iskeyword(name): raise TypeError(f'Field names must not be keywords: {name!r}') if name in seen: raise TypeError(f'Field name duplicated: {name!r}') seen.add(name) anns[name] = tp namespace['__annotations__'] = anns # We use `types.new_class()` instead of simply `type()` to allow dynamic creation # of generic dataclassses. cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace)) # TODO: this hack is the same that can be found in enum.py and should be # removed if there ever is a way to get the caller module. if module is None: try: module = sys._getframe(1).f_globals['__name__'] except (AttributeError, ValueError): pass if module is None: _make_class_unpicklable(cls) else: cls.__module__ = module if qualname is not None: cls.__qualname__ = qualname return dataclass(cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen) " 42904,"def random_symplectic(N, passive=False, block_diag=False): r""""""Random symplectic matrix representing a Gaussian transformation. The squeezing parameters :math:`r` for active transformations are randomly sampled from the standard normal distribution, while passive transformations are randomly sampled from the Haar measure. Args: N (int): number of modes passive (bool): if True, returns a passive Gaussian transformation (i.e., one that preserves photon number). If False (default), returns an active transformation. block_diag (bool): if True, uses passive Gaussian transformations that are orthogonal instead of unitary. This implies that the positions :math:`q` do not mix with the momenta :math:`p` and thus the symplectic operator is block diagonal Returns: array: random :math:`2N\times 2N` symplectic matrix """""" U = random_interferometer(N, real=block_diag) O = np.vstack([np.hstack([U.real, -U.imag]), np.hstack([U.imag, U.real])]) if passive: return O U = random_interferometer(N, real=block_diag) P = np.vstack([np.hstack([U.real, -U.imag]), np.hstack([U.imag, U.real])]) r = np.abs(randnc(N)) Sq = np.diag(np.concatenate([np.exp(-r), np.exp(r)])) return O @ Sq @ P ","def random_symplectic(N, passive=False, block_diag=False): r""""""Random symplectic matrix representing a Gaussian transformation. The squeezing parameters :math:`r` for active transformations are randomly sampled from the standard normal distribution, while passive transformations are randomly sampled from the Haar measure. Args: N (int): number of modes passive (bool): if True, returns a passive Gaussian transformation (i.e., one that preserves photon number). If False (default), returns an active transformation. block_diag (bool): if True, uses passive Gaussian transformations that are orthogonal instead of unitary. This implies that the positions :math:`q` do not mix with the momenta :math:`p` and thus the symplectic operator is block diagonal. Returns: array: random :math:`2N\times 2N` symplectic matrix """""" U = random_interferometer(N, real=block_diag) O = np.vstack([np.hstack([U.real, -U.imag]), np.hstack([U.imag, U.real])]) if passive: return O U = random_interferometer(N, real=block_diag) P = np.vstack([np.hstack([U.real, -U.imag]), np.hstack([U.imag, U.real])]) r = np.abs(randnc(N)) Sq = np.diag(np.concatenate([np.exp(-r), np.exp(r)])) return O @ Sq @ P " 39546,"def process_link(link) -> List[AdvisoryData]: json_response = requests.get(link).content record = json.loads(json_response) advisories: List[AdvisoryData] = [] if record[""packages""] is None: return advisories for package in record[""packages""]: advisories.extend( load_advisories( package[""pkg""], record[""distroversion""], record[""reponame""], record[""archs""], ) ) return advisories ","def fetch_advisory_links(): BASE_URL = ""https://secdb.alpinelinux.org/"" index_page = BeautifulSoup(requests.get(BASE_URL).content, features=""lxml"") alpine_versions = [link.text for link in index_page.find_all(""a"") if link.text.startswith(""v"")] advisory_directory_links = [f""{BASE_URL}{version}"" for version in alpine_versions] advisory_links = [] for advisory_directory_link in advisory_directory_links: advisory_directory_page = requests.get(advisory_directory_link).content advisory_directory_page = BeautifulSoup(advisory_directory_page, features=""lxml"") advisory_links.extend( [ f""{advisory_directory_link}{anchore_tag.text}"" for anchore_tag in advisory_directory_page.find_all(""a"") if anchore_tag.text.endswith(""json"") ] ) return advisory_links json_response = requests.get(link).json json_response = requests.get(link).content record = json.loads(json_response) advisories: List[AdvisoryData] = [] if record[""packages""] is None: return advisories for package in record[""packages""]: advisories.extend( load_advisories( package[""pkg""], record[""distroversion""], record[""reponame""], record[""archs""], ) ) return advisories " 50619,"def cwt(data, scales, wavelet, sampling_period=1., method='conv', precision=12, axis=-1): """""" cwt(data, scales, wavelet) One dimensional Continuous Wavelet Transform. Parameters ---------- data : array_like Input signal scales : array_like The wavelet scales to use. One can use ``f = scale2frequency(wavelet, scale)/sampling_period`` to determine what physical frequency, ``f``. Here, ``f`` is in hertz when the ``sampling_period`` is given in seconds. wavelet : Wavelet object or name Wavelet to use sampling_period : float Sampling period for the frequencies output (optional). The values computed for ``coefs`` are independent of the choice of ``sampling_period`` (i.e. ``scales`` is not scaled by the sampling period). method : {'conv', 'fft'}, optional The method used to compute the CWT. Can be any of: - ``conv`` uses ``numpy.convolve``. - ``fft`` uses frequency domain convolution. - ``auto`` uses automatic selection based on an estimate of the computational complexity at each scale. The ``conv`` method complexity is ``O(len(scale) * len(data))``. The ``fft`` method is ``O(N * log2(N))`` with ``N = len(scale) + len(data) - 1``. It is well suited for large size signals but slightly slower than ``conv`` on small ones. precision: int, optional Length of wavelet (2 ** precision) used to compute the CWT. Greater will increase resolution, especially for higher scales, but will compute a bit slower. Too low will distort coefficients and their norms, with a zipper-like effect; recommended >= 12. axis: int, optional Axis over which to compute the CWT. If not given, the last axis is used. Returns ------- coefs : array_like Continuous wavelet transform of the input signal for the given scales and wavelet. The first axis of ``coefs`` corresponds to the scales. The remaining axes match the shape of ``data``. frequencies : array_like If the unit of sampling period are seconds and given, than frequencies are in hertz. Otherwise, a sampling period of 1 is assumed. Notes ----- Size of coefficients arrays depends on the length of the input array and the length of given scales. Examples -------- >>> import pywt >>> import numpy as np >>> import matplotlib.pyplot as plt >>> x = np.arange(512) >>> y = np.sin(2*np.pi*x/32) >>> coef, freqs=pywt.cwt(y,np.arange(1,129),'gaus1') >>> plt.matshow(coef) # doctest: +SKIP >>> plt.show() # doctest: +SKIP ---------- >>> import pywt >>> import numpy as np >>> import matplotlib.pyplot as plt >>> t = np.linspace(-1, 1, 200, endpoint=False) >>> sig = np.cos(2 * np.pi * 7 * t) + np.real(np.exp(-7*(t-0.4)**2)*np.exp(1j*2*np.pi*2*(t-0.4))) >>> widths = np.arange(1, 31) >>> cwtmatr, freqs = pywt.cwt(sig, widths, 'mexh') >>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto', ... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max()) # doctest: +SKIP >>> plt.show() # doctest: +SKIP """""" # accept array_like input; make a copy to ensure a contiguous array dt = _check_dtype(data) data = np.asarray(data, dtype=dt) dt_cplx = np.result_type(dt, np.complex64) if not isinstance(wavelet, (ContinuousWavelet, Wavelet)): wavelet = DiscreteContinuousWavelet(wavelet) if np.isscalar(scales): scales = np.array([scales]) if not np.isscalar(axis): raise ValueError(""axis must be a scalar."") dt_out = dt_cplx if wavelet.complex_cwt else dt out = np.empty((np.size(scales),) + data.shape, dtype=dt_out) int_psi, x = integrate_wavelet(wavelet, precision=precision) int_psi = np.conj(int_psi) if wavelet.complex_cwt else int_psi # convert int_psi, x to the same precision as the data dt_psi = dt_cplx if int_psi.dtype.kind == 'c' else dt int_psi = np.asarray(int_psi, dtype=dt_psi) x = np.asarray(x, dtype=data.real.dtype) if method == 'fft': size_scale0 = -1 fft_data = None elif not method == 'conv': raise ValueError(""method must be 'conv' or 'fft'"") if data.ndim > 1: # move axis to be transformed last (so it is contiguous) data = data.swapaxes(-1, axis) # reshape to (n_batch, data.shape[-1]) data_shape_pre = data.shape data = data.reshape((-1, data.shape[-1])) for i, scale in enumerate(scales): step = x[1] - x[0] j = np.arange(scale * (x[-1] - x[0]) + 1) / (scale * step) j = j.astype(int) # floor if j[-1] >= int_psi.size: j = np.extract(j < int_psi.size, j) int_psi_scale = int_psi[j][::-1] if method == 'conv': if data.ndim == 1: conv = np.convolve(data, int_psi_scale) else: # batch convolution via loop conv_shape = list(data.shape) conv_shape[-1] += int_psi_scale.size - 1 conv_shape = tuple(conv_shape) conv = np.empty(conv_shape, dtype=dt_out) for n in range(data.shape[0]): conv[n, :] = np.convolve(data[n], int_psi_scale) else: # The padding is selected for: # - optimal FFT complexity # - to be larger than the two signals length to avoid circular # convolution size_scale = next_fast_len( data.shape[-1] + int_psi_scale.size - 1 ) if size_scale != size_scale0: # Must recompute fft_data when the padding size changes. fft_data = fftmodule.fft(data, size_scale, axis=-1) size_scale0 = size_scale fft_wav = fftmodule.fft(int_psi_scale, size_scale, axis=-1) conv = fftmodule.ifft(fft_wav * fft_data, axis=-1) conv = conv[..., :data.shape[-1] + int_psi_scale.size - 1] coef = - np.sqrt(scale) * np.diff(conv, axis=-1) if out.dtype.kind != 'c': coef = coef.real # transform axis is always -1 due to the data reshape above d = (coef.shape[-1] - data.shape[-1]) / 2. if d > 0: coef = coef[..., floor(d):-ceil(d)] elif d < 0: raise ValueError( ""Selected scale of {} too small."".format(scale)) if data.ndim > 1: # restore original data shape and axis position coef = coef.reshape(data_shape_pre) coef = coef.swapaxes(axis, -1) out[i, ...] = coef frequencies = scale2frequency(wavelet, scales, precision) if np.isscalar(frequencies): frequencies = np.array([frequencies]) frequencies /= sampling_period return out, frequencies ","def cwt(data, scales, wavelet, sampling_period=1., method='conv', axis=-1, *, precision=12): """""" cwt(data, scales, wavelet) One dimensional Continuous Wavelet Transform. Parameters ---------- data : array_like Input signal scales : array_like The wavelet scales to use. One can use ``f = scale2frequency(wavelet, scale)/sampling_period`` to determine what physical frequency, ``f``. Here, ``f`` is in hertz when the ``sampling_period`` is given in seconds. wavelet : Wavelet object or name Wavelet to use sampling_period : float Sampling period for the frequencies output (optional). The values computed for ``coefs`` are independent of the choice of ``sampling_period`` (i.e. ``scales`` is not scaled by the sampling period). method : {'conv', 'fft'}, optional The method used to compute the CWT. Can be any of: - ``conv`` uses ``numpy.convolve``. - ``fft`` uses frequency domain convolution. - ``auto`` uses automatic selection based on an estimate of the computational complexity at each scale. The ``conv`` method complexity is ``O(len(scale) * len(data))``. The ``fft`` method is ``O(N * log2(N))`` with ``N = len(scale) + len(data) - 1``. It is well suited for large size signals but slightly slower than ``conv`` on small ones. precision: int, optional Length of wavelet (2 ** precision) used to compute the CWT. Greater will increase resolution, especially for higher scales, but will compute a bit slower. Too low will distort coefficients and their norms, with a zipper-like effect; recommended >= 12. axis: int, optional Axis over which to compute the CWT. If not given, the last axis is used. Returns ------- coefs : array_like Continuous wavelet transform of the input signal for the given scales and wavelet. The first axis of ``coefs`` corresponds to the scales. The remaining axes match the shape of ``data``. frequencies : array_like If the unit of sampling period are seconds and given, than frequencies are in hertz. Otherwise, a sampling period of 1 is assumed. Notes ----- Size of coefficients arrays depends on the length of the input array and the length of given scales. Examples -------- >>> import pywt >>> import numpy as np >>> import matplotlib.pyplot as plt >>> x = np.arange(512) >>> y = np.sin(2*np.pi*x/32) >>> coef, freqs=pywt.cwt(y,np.arange(1,129),'gaus1') >>> plt.matshow(coef) # doctest: +SKIP >>> plt.show() # doctest: +SKIP ---------- >>> import pywt >>> import numpy as np >>> import matplotlib.pyplot as plt >>> t = np.linspace(-1, 1, 200, endpoint=False) >>> sig = np.cos(2 * np.pi * 7 * t) + np.real(np.exp(-7*(t-0.4)**2)*np.exp(1j*2*np.pi*2*(t-0.4))) >>> widths = np.arange(1, 31) >>> cwtmatr, freqs = pywt.cwt(sig, widths, 'mexh') >>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto', ... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max()) # doctest: +SKIP >>> plt.show() # doctest: +SKIP """""" # accept array_like input; make a copy to ensure a contiguous array dt = _check_dtype(data) data = np.asarray(data, dtype=dt) dt_cplx = np.result_type(dt, np.complex64) if not isinstance(wavelet, (ContinuousWavelet, Wavelet)): wavelet = DiscreteContinuousWavelet(wavelet) if np.isscalar(scales): scales = np.array([scales]) if not np.isscalar(axis): raise ValueError(""axis must be a scalar."") dt_out = dt_cplx if wavelet.complex_cwt else dt out = np.empty((np.size(scales),) + data.shape, dtype=dt_out) int_psi, x = integrate_wavelet(wavelet, precision=precision) int_psi = np.conj(int_psi) if wavelet.complex_cwt else int_psi # convert int_psi, x to the same precision as the data dt_psi = dt_cplx if int_psi.dtype.kind == 'c' else dt int_psi = np.asarray(int_psi, dtype=dt_psi) x = np.asarray(x, dtype=data.real.dtype) if method == 'fft': size_scale0 = -1 fft_data = None elif not method == 'conv': raise ValueError(""method must be 'conv' or 'fft'"") if data.ndim > 1: # move axis to be transformed last (so it is contiguous) data = data.swapaxes(-1, axis) # reshape to (n_batch, data.shape[-1]) data_shape_pre = data.shape data = data.reshape((-1, data.shape[-1])) for i, scale in enumerate(scales): step = x[1] - x[0] j = np.arange(scale * (x[-1] - x[0]) + 1) / (scale * step) j = j.astype(int) # floor if j[-1] >= int_psi.size: j = np.extract(j < int_psi.size, j) int_psi_scale = int_psi[j][::-1] if method == 'conv': if data.ndim == 1: conv = np.convolve(data, int_psi_scale) else: # batch convolution via loop conv_shape = list(data.shape) conv_shape[-1] += int_psi_scale.size - 1 conv_shape = tuple(conv_shape) conv = np.empty(conv_shape, dtype=dt_out) for n in range(data.shape[0]): conv[n, :] = np.convolve(data[n], int_psi_scale) else: # The padding is selected for: # - optimal FFT complexity # - to be larger than the two signals length to avoid circular # convolution size_scale = next_fast_len( data.shape[-1] + int_psi_scale.size - 1 ) if size_scale != size_scale0: # Must recompute fft_data when the padding size changes. fft_data = fftmodule.fft(data, size_scale, axis=-1) size_scale0 = size_scale fft_wav = fftmodule.fft(int_psi_scale, size_scale, axis=-1) conv = fftmodule.ifft(fft_wav * fft_data, axis=-1) conv = conv[..., :data.shape[-1] + int_psi_scale.size - 1] coef = - np.sqrt(scale) * np.diff(conv, axis=-1) if out.dtype.kind != 'c': coef = coef.real # transform axis is always -1 due to the data reshape above d = (coef.shape[-1] - data.shape[-1]) / 2. if d > 0: coef = coef[..., floor(d):-ceil(d)] elif d < 0: raise ValueError( ""Selected scale of {} too small."".format(scale)) if data.ndim > 1: # restore original data shape and axis position coef = coef.reshape(data_shape_pre) coef = coef.swapaxes(axis, -1) out[i, ...] = coef frequencies = scale2frequency(wavelet, scales, precision) if np.isscalar(frequencies): frequencies = np.array([frequencies]) frequencies /= sampling_period return out, frequencies " 30802,"def get_company_info_command(client, args): res = client.get_company_info() md = tableToMarkdown('SpaceX Company Info:', res) command_results = CommandResults( outputs_prefix='SpaceX.Company.Info', outputs_key_field='name', outputs=res, readable_output=md ) return_results(command_results) ","def get_company_info_command(client): res = client.get_company_info() md = tableToMarkdown('SpaceX Company Info:', res) command_results = CommandResults( outputs_prefix='SpaceX.Company.Info', outputs_key_field='name', outputs=res, readable_output=md ) return_results(command_results) " 27016,"def _move_dangling_data_to_new_table( session, source_table: ""Table"", source_query: ""Query"", target_table_name: str ): from sqlalchemy import column, select, table from sqlalchemy.sql.selectable import Join bind = session.get_bind() dialect_name = bind.dialect.name # First: Create moved rows from new table if dialect_name == ""mssql"": cte = source_query.cte(""source"") moved_data_tbl = table(target_table_name, *(column(c.name) for c in cte.columns)) ins = moved_data_tbl.insert().from_select(list(cte.columns), select([cte])) stmt = ins.compile(bind=session.get_bind()) cte_sql = stmt.ctes[cte] session.execute(f""WITH {cte_sql} SELECT source.* INTO {target_table_name} FROM source"") elif dialect_name == ""mysql"": # MySQL when replcation is turned needs this split in to two queries, so just do it for all MySQL # ERROR 1786 (HY000): Statement violates GTID consistency: CREATE TABLE ... SELECT. session.execute(f""CREATE TABLE {target_table_name} LIKE {source_table.name}"") session.execute( f""INSERT INTO {target_table_name} {source_query.selectable.compile(bind=session.get_bind())}"" ) else: # Postgres and SQLite both support the same ""CREATE TABLE a AS SELECT ..."" syntax session.execute( f""CREATE TABLE {target_table_name} AS {source_query.selectable.compile(bind=session.get_bind())}"" ) # Second: Now delete rows we've moved try: clause = source_query.whereclause except AttributeError: clause = source_query._whereclause if dialect_name == ""sqlite"": subq = source_query.selectable.with_only_columns([text(f'{source_table}.ROWID')]) delete = source_table.delete().where(column('ROWID').in_(subq)) elif dialect_name in (""mysql"", ""mssql""): # This is not foolproof! But it works for the limited queries (with no params) that we use here stmt = source_query.selectable def _from_name(from_) -> str: if isinstance(from_, Join): return str(from_.compile(bind=bind)) return str(from_) delete = ( f""DELETE {source_table} FROM { ', '.join(_from_name(tbl) for tbl in stmt.froms) }"" f"" WHERE {clause.compile(bind=bind)}"" ) else: for frm in source_query.selectable.froms: if hasattr(frm, 'onclause'): # Table, or JOIN? clause &= frm.onclause delete = source_table.delete(clause) session.execute(delete) ","def _move_dangling_data_to_new_table( session, source_table: ""Table"", source_query: ""Query"", target_table_name: str ): from sqlalchemy import column, select, table from sqlalchemy.sql.selectable import Join bind = session.get_bind() dialect_name = bind.dialect.name # First: Create moved rows from new table if dialect_name == ""mssql"": cte = source_query.cte(""source"") moved_data_tbl = table(target_table_name, *(column(c.name) for c in cte.columns)) ins = moved_data_tbl.insert().from_select(list(cte.columns), select([cte])) stmt = ins.compile(bind=session.get_bind()) cte_sql = stmt.ctes[cte] session.execute(f""WITH {cte_sql} SELECT source.* INTO {target_table_name} FROM source"") elif dialect_name == ""mysql"": # MySQL with replication needs this split in to two queries, so just do it for all MySQL # ERROR 1786 (HY000): Statement violates GTID consistency: CREATE TABLE ... SELECT. session.execute(f""CREATE TABLE {target_table_name} LIKE {source_table.name}"") session.execute( f""INSERT INTO {target_table_name} {source_query.selectable.compile(bind=session.get_bind())}"" ) else: # Postgres and SQLite both support the same ""CREATE TABLE a AS SELECT ..."" syntax session.execute( f""CREATE TABLE {target_table_name} AS {source_query.selectable.compile(bind=session.get_bind())}"" ) # Second: Now delete rows we've moved try: clause = source_query.whereclause except AttributeError: clause = source_query._whereclause if dialect_name == ""sqlite"": subq = source_query.selectable.with_only_columns([text(f'{source_table}.ROWID')]) delete = source_table.delete().where(column('ROWID').in_(subq)) elif dialect_name in (""mysql"", ""mssql""): # This is not foolproof! But it works for the limited queries (with no params) that we use here stmt = source_query.selectable def _from_name(from_) -> str: if isinstance(from_, Join): return str(from_.compile(bind=bind)) return str(from_) delete = ( f""DELETE {source_table} FROM { ', '.join(_from_name(tbl) for tbl in stmt.froms) }"" f"" WHERE {clause.compile(bind=bind)}"" ) else: for frm in source_query.selectable.froms: if hasattr(frm, 'onclause'): # Table, or JOIN? clause &= frm.onclause delete = source_table.delete(clause) session.execute(delete) " 35742,"def rotate_image_tensor( img: torch.Tensor, angle: float, interpolation: InterpolationMode = InterpolationMode.NEAREST, expand: bool = False, fill: Optional[List[float]] = None, center: Optional[List[float]] = None, ) -> torch.Tensor: center_f = [0.0, 0.0] if center is not None: if expand: warnings.warn(""If provided center argument is ignored if expand is True"") else: _, height, width = get_dimensions_image_tensor(img) # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center. center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])] # due to current incoherence of rotation angle direction between affine and rotate implementations # we need to set -angle. matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0]) return _FT.rotate(img, matrix, interpolation=interpolation.value, expand=expand, fill=fill) ","def rotate_image_tensor( img: torch.Tensor, angle: float, interpolation: InterpolationMode = InterpolationMode.NEAREST, expand: bool = False, fill: Optional[List[float]] = None, center: Optional[List[float]] = None, ) -> torch.Tensor: center_f = [0.0, 0.0] if center is not None: if expand: warnings.warn(""The provided center argument is ignored if expand is True"") else: _, height, width = get_dimensions_image_tensor(img) # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center. center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])] # due to current incoherence of rotation angle direction between affine and rotate implementations # we need to set -angle. matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0]) return _FT.rotate(img, matrix, interpolation=interpolation.value, expand=expand, fill=fill) " 40413,"def run(args: argparse.ArgumentParser) -> None: print(""BENCHMARK STARTS"") for dataset_name in args.datasets: print(""Dataset: "", dataset_name) if args.datasets_root == 'data': root = osp.join(osp.dirname(osp.realpath(__file__)), '../..', 'data', dataset_name.partition(""-"")[2]) else: root = args.datasets_root if dataset_name == 'ogbn-mag': transform = T.ToUndirected(merge=True) dataset = OGB_MAG(root=root, transform=transform) train_idx = ('paper', dataset[0]['paper'].train_mask) valid_idx = ('paper', dataset[0]['paper'].val_mask) neighbour_sizes = args.hetero_neighbour_sizes else: dataset = PygNodePropPredDataset(dataset_name, root) split_idx = dataset.get_idx_split() train_idx = split_idx['train'] valid_idx = split_idx['valid'] neighbour_sizes = args.homo_neighbour_sizes data = dataset[0].to(args.device) print('Train sampling') for sizes in neighbour_sizes: print(f'Sizes={sizes}') for batch_size in args.batch_sizes: train_loader = NeighborLoader(data, num_neighbors=sizes, input_nodes=train_idx, batch_size=batch_size, shuffle=True, num_workers=args.num_workers) start = default_timer() iter = 0 times = [] for run in range(args.runs): start = default_timer() for batch in train_loader: iter = iter + 1 stop = default_timer() times.append(round(stop - start, 3)) average_time = round(sum(times) / args.runs, 3) print(f'Batch size={batch_size} iterations={iter} ' + f'times={times} average_time={average_time}') print('Validation sampling') for batch_size in args.eval_batch_sizes: val_loader = NeighborLoader(data, num_neighbors=[-1], input_nodes=valid_idx, batch_size=batch_size, shuffle=False, num_workers=args.num_workers) start = default_timer() iter = 0 times = [] for run in range(args.runs): start = default_timer() for batch in val_loader: iter = iter + 1 stop = default_timer() times.append(round(stop - start, 3)) average_time = round(sum(times) / args.runs, 3) print(f'Batch size={batch_size} iterations={iter} ' + f'times={times} average_time={average_time}') ","def run(args: argparse.ArgumentParser) -> None: print(""BENCHMARK STARTS"") for dataset_name in args.datasets: print(""Dataset: "", dataset_name) if args.datasets_root == 'data': root = osp.join(osp.dirname(osp.realpath(__file__)), '../..', 'data', dataset_name.partition(""-"")[2]) else: root = args.datasets_root if dataset_name == 'ogbn-mag': transform = T.ToUndirected(merge=True) dataset = OGB_MAG(root=root, transform=transform) train_idx = ('paper', dataset[0]['paper'].train_mask) valid_idx = ('paper', dataset[0]['paper'].val_mask) neighbor_sizes = args.hetero_neighbor_sizes else: dataset = PygNodePropPredDataset(dataset_name, root) split_idx = dataset.get_idx_split() train_idx = split_idx['train'] valid_idx = split_idx['valid'] neighbour_sizes = args.homo_neighbour_sizes data = dataset[0].to(args.device) print('Train sampling') for sizes in neighbour_sizes: print(f'Sizes={sizes}') for batch_size in args.batch_sizes: train_loader = NeighborLoader(data, num_neighbors=sizes, input_nodes=train_idx, batch_size=batch_size, shuffle=True, num_workers=args.num_workers) start = default_timer() iter = 0 times = [] for run in range(args.runs): start = default_timer() for batch in train_loader: iter = iter + 1 stop = default_timer() times.append(round(stop - start, 3)) average_time = round(sum(times) / args.runs, 3) print(f'Batch size={batch_size} iterations={iter} ' + f'times={times} average_time={average_time}') print('Validation sampling') for batch_size in args.eval_batch_sizes: val_loader = NeighborLoader(data, num_neighbors=[-1], input_nodes=valid_idx, batch_size=batch_size, shuffle=False, num_workers=args.num_workers) start = default_timer() iter = 0 times = [] for run in range(args.runs): start = default_timer() for batch in val_loader: iter = iter + 1 stop = default_timer() times.append(round(stop - start, 3)) average_time = round(sum(times) / args.runs, 3) print(f'Batch size={batch_size} iterations={iter} ' + f'times={times} average_time={average_time}') " 25050,"def _safe_infer_call_result( node: nodes.AsyncFunctionDef | nodes.FunctionDef, caller: nodes.AsyncFunctionDef | nodes.FunctionDef, context: Any | None = None, ) -> Any: """"""Safely infer the return value of a function. Returns None if inference failed or if there is some ambiguity (more than one node has been inferred). Otherwise, returns inferred value. """""" try: inferit = node.infer_call_result(caller, context=context) value = next(inferit) except astroid.InferenceError: return None # inference failed except StopIteration: return None # no values inferred try: next(inferit) return None # there is ambiguity on the inferred node except astroid.InferenceError: return None # there is some kind of ambiguity except StopIteration: return value ","def _safe_infer_call_result( node: nodes.AsyncFunctionDef | nodes.FunctionDef, caller: nodes.AsyncFunctionDef | nodes.FunctionDef, context: InferenceContext | None = None, ) -> Any: """"""Safely infer the return value of a function. Returns None if inference failed or if there is some ambiguity (more than one node has been inferred). Otherwise, returns inferred value. """""" try: inferit = node.infer_call_result(caller, context=context) value = next(inferit) except astroid.InferenceError: return None # inference failed except StopIteration: return None # no values inferred try: next(inferit) return None # there is ambiguity on the inferred node except astroid.InferenceError: return None # there is some kind of ambiguity except StopIteration: return value " 3329,"def get_performance_facets( query, params, orderby=None, aggregate_column=""duration"", aggregate_function=""avg"", limit=20, referrer=None, ): """""" High-level API for getting 'facet map' results for performance data Performance facets are high frequency tags and the aggregate duration of their most frequent values query (str) Filter query string to create conditions from. params (Dict[str, str]) Filtering parameters with start, end, project_id, environment limit (int) The number of records to fetch. referrer (str|None) A referrer string to help locate the origin of this query. Returns Sequence[FacetResult] """""" with sentry_sdk.start_span( op=""discover.discover"", description=""facets.filter_transform"" ) as span: span.set_data(""query"", query) snuba_filter = get_filter(query, params) # Resolve the public aliases into the discover dataset names. snuba_filter, translated_columns = resolve_discover_aliases(snuba_filter) with sentry_sdk.start_span(op=""discover.discover"", description=""facets.frequent_tags""): # Get the most relevant tag keys key_names = raw_query( aggregations=[ [aggregate_function, aggregate_column, ""aggregate""], [""count"", None, ""count""], ], start=snuba_filter.start, end=snuba_filter.end, conditions=snuba_filter.conditions, filter_keys=snuba_filter.filter_keys, orderby=[""-count""], dataset=Dataset.Discover, limit=limit, referrer=""{}.{}"".format(referrer, ""all_transactions""), ) counts = [r[""count""] for r in key_names[""data""]] aggregates = [r[""aggregate""] for r in key_names[""data""]] # Return early to avoid doing more queries with 0 count transactions or aggregates for columns that dont exist if len(counts) != 1 or counts[0] == 0 or aggregates[0] is None: return [] results = [] snuba_filter.conditions.append([aggregate_column, ""IS NOT NULL"", None]) # Aggregate for transaction transaction_aggregate = key_names[""data""][0][""aggregate""] # Dynamically sample so at least 10000 transactions are selected transaction_count = key_names[""data""][0][""count""] sampling_enabled = transaction_count > 50000 # Log growth starting at 50,000 target_sample = 50000 * (math.log(transaction_count, 10) - 3) dynamic_sample_rate = 0 if transaction_count <= 0 else (target_sample / transaction_count) sample_rate = math.min(math.max(dynamic_sample_rate, 0), 1) if sampling_enabled else None frequency_sample_rate = sample_rate if sample_rate else 1 excluded_tags = [ ""tags_key"", ""NOT IN"", [""trace"", ""trace.ctx"", ""trace.span"", ""project"", ""browser"", ""celery_task_id""], ] with sentry_sdk.start_span(op=""discover.discover"", description=""facets.aggregate_tags""): conditions = snuba_filter.conditions aggregate_comparison = transaction_aggregate * 1.01 if transaction_aggregate else 0 having = [excluded_tags] if orderby and orderby in (""sumdelta"", ""-sumdelta"", ""aggregate"", ""-aggregate""): having.append([""aggregate"", "">"", aggregate_comparison]) if orderby is None: orderby = [] else: orderby = [orderby] tag_values = raw_query( selected_columns=[ [ ""sum"", [ ""minus"", [ aggregate_column, str(transaction_aggregate), ], ], ""sumdelta"", ], ], aggregations=[ [aggregate_function, aggregate_column, ""aggregate""], [""count"", None, ""cnt""], ], conditions=conditions, start=snuba_filter.start, end=snuba_filter.end, filter_keys=snuba_filter.filter_keys, orderby=orderby + [""tags_key""], groupby=[""tags_key"", ""tags_value""], having=having, dataset=Dataset.Discover, referrer=""{}.{}"".format(referrer, ""tag_values""), sample=sample_rate, turbo=sample_rate is not None, limitby=[1, ""tags_key""], ) results.extend( [ PerformanceFacetResult( key=r[""tags_key""], value=r[""tags_value""], performance=float(r[""aggregate""]), frequency=float((r[""cnt""] / frequency_sample_rate) / transaction_count), comparison=float(r[""aggregate""] / transaction_aggregate), sumdelta=float(r[""sumdelta""]), ) for r in tag_values[""data""] ] ) return results ","def get_performance_facets( query, params, orderby=None, aggregate_column=""duration"", aggregate_function=""avg"", limit=20, referrer=None, ): """""" High-level API for getting 'facet map' results for performance data Performance facets are high frequency tags and the aggregate duration of their most frequent values query (str) Filter query string to create conditions from. params (Dict[str, str]) Filtering parameters with start, end, project_id, environment limit (int) The number of records to fetch. referrer (str|None) A referrer string to help locate the origin of this query. Returns Sequence[FacetResult] """""" with sentry_sdk.start_span( op=""discover.discover"", description=""facets.filter_transform"" ) as span: span.set_data(""query"", query) snuba_filter = get_filter(query, params) # Resolve the public aliases into the discover dataset names. snuba_filter, translated_columns = resolve_discover_aliases(snuba_filter) with sentry_sdk.start_span(op=""discover.discover"", description=""facets.frequent_tags""): # Get the most relevant tag keys key_names = raw_query( aggregations=[ [aggregate_function, aggregate_column, ""aggregate""], [""count"", None, ""count""], ], start=snuba_filter.start, end=snuba_filter.end, conditions=snuba_filter.conditions, filter_keys=snuba_filter.filter_keys, orderby=[""-count""], dataset=Dataset.Discover, limit=limit, referrer=""{}.{}"".format(referrer, ""all_transactions""), ) counts = [r[""count""] for r in key_names[""data""]] aggregates = [r[""aggregate""] for r in key_names[""data""]] # Return early to avoid doing more queries with 0 count transactions or aggregates for columns that dont exist if len(counts) != 1 or counts[0] == 0 or aggregates[0] is None: return [] results = [] snuba_filter.conditions.append([aggregate_column, ""IS NOT NULL"", None]) # Aggregate for transaction transaction_aggregate = key_names[""data""][0][""aggregate""] # Dynamically sample so at least 10000 transactions are selected transaction_count = key_names[""data""][0][""count""] sampling_enabled = transaction_count > 50000 # Log growth starting at 50,000 target_sample = 50000 * (math.log(transaction_count, 10) - 3) dynamic_sample_rate = 0 if transaction_count <= 0 else (target_sample / transaction_count) sample_rate = min(max(dynamic_sample_rate, 0), 1) if sampling_enabled else None frequency_sample_rate = sample_rate if sample_rate else 1 excluded_tags = [ ""tags_key"", ""NOT IN"", [""trace"", ""trace.ctx"", ""trace.span"", ""project"", ""browser"", ""celery_task_id""], ] with sentry_sdk.start_span(op=""discover.discover"", description=""facets.aggregate_tags""): conditions = snuba_filter.conditions aggregate_comparison = transaction_aggregate * 1.01 if transaction_aggregate else 0 having = [excluded_tags] if orderby and orderby in (""sumdelta"", ""-sumdelta"", ""aggregate"", ""-aggregate""): having.append([""aggregate"", "">"", aggregate_comparison]) if orderby is None: orderby = [] else: orderby = [orderby] tag_values = raw_query( selected_columns=[ [ ""sum"", [ ""minus"", [ aggregate_column, str(transaction_aggregate), ], ], ""sumdelta"", ], ], aggregations=[ [aggregate_function, aggregate_column, ""aggregate""], [""count"", None, ""cnt""], ], conditions=conditions, start=snuba_filter.start, end=snuba_filter.end, filter_keys=snuba_filter.filter_keys, orderby=orderby + [""tags_key""], groupby=[""tags_key"", ""tags_value""], having=having, dataset=Dataset.Discover, referrer=""{}.{}"".format(referrer, ""tag_values""), sample=sample_rate, turbo=sample_rate is not None, limitby=[1, ""tags_key""], ) results.extend( [ PerformanceFacetResult( key=r[""tags_key""], value=r[""tags_value""], performance=float(r[""aggregate""]), frequency=float((r[""cnt""] / frequency_sample_rate) / transaction_count), comparison=float(r[""aggregate""] / transaction_aggregate), sumdelta=float(r[""sumdelta""]), ) for r in tag_values[""data""] ] ) return results " 30636,"def list_devices_command(client: Client, args: dict) -> CommandResults: device_id = argToList(args.get('device_id')) status = argToList(args.get('status')) device_os = argToList(args.get('device_os')) last_contact_time = { 'start': args.get('start_time'), 'end': args.get('end_time') } args.get('last_contact_time') ad_group_id = argToList(args.get('ad_group_id')) policy_id = argToList(args.get('policy_id')) target_priority = argToList(args.get('target_priority')) limit = args.get('limit') sort_field = args.get('sort_field', '') sort_order = args.get('sort_order') contents = [] result = client.devices_list_request(device_id, status, device_os, last_contact_time, ad_group_id, policy_id, target_priority, limit, sort_field, sort_order) devices = result.get('results') if not devices: return 'No devices were found.' for device in devices: contents.append({ 'ID': device.get('id'), 'Name': device.get('name'), 'OS': device.get('os'), 'LastInternalIpAddress': device.get('last_internal_ip_address'), 'LastExternalIpAddress': device.get('last_external_ip_address'), 'LastContactTime': device.get('last_contact_time'), 'LastLocation': device.get('last_location'), 'PolicyName': device.get('policy_name'), 'Quarantined': device.get('quarantined'), 'status': device.get('status'), 'TargetPriority': device.get('target_priority') }) readable_output = tableToMarkdown('Devices list results', contents) results = CommandResults( outputs_prefix='CarbonBlackEEDR.Device', outputs_key_field='id', outputs=devices, readable_output=readable_output, raw_response=result ) return results ","def list_devices_command(client: Client, args: dict) -> CommandResults: device_id = argToList(args.get('device_id')) status = argToList(args.get('status')) device_os = argToList(args.get('device_os')) last_contact_time = { 'start': args.get('start_time'), 'end': args.get('end_time') } args.get('last_contact_time') ad_group_id = argToList(args.get('ad_group_id')) policy_id = argToList(args.get('policy_id')) target_priority = argToList(args.get('target_priority')) limit = args.get('limit') sort_field = args.get('sort_field', '') sort_order = args.get('sort_order') contents = [] result = client.devices_list_request(device_id, status, device_os, last_contact_time, ad_group_id, policy_id, target_priority, limit, sort_field, sort_order) devices = result.get('results', []) if not devices: return 'No devices were found.' for device in devices: contents.append({ 'ID': device.get('id'), 'Name': device.get('name'), 'OS': device.get('os'), 'LastInternalIpAddress': device.get('last_internal_ip_address'), 'LastExternalIpAddress': device.get('last_external_ip_address'), 'LastContactTime': device.get('last_contact_time'), 'LastLocation': device.get('last_location'), 'PolicyName': device.get('policy_name'), 'Quarantined': device.get('quarantined'), 'status': device.get('status'), 'TargetPriority': device.get('target_priority') }) readable_output = tableToMarkdown('Devices list results', contents) results = CommandResults( outputs_prefix='CarbonBlackEEDR.Device', outputs_key_field='id', outputs=devices, readable_output=readable_output, raw_response=result ) return results " 30432,"def check_ip_command(ip, days=MAX_AGE, verbose=VERBOSE, threshold=THRESHOLD): ip_list = argToList(ip) entry_list = [] for corrent_ip in ip_list: params = { ""ipAddress"": corrent_ip, ""maxAgeInDays"": days } if verbose: params['verbose'] = ""verbose"" analysis = http_request(""GET"", url_suffix=CHECK_CMD, params=params).get(""data"") entry_list.append(analysis_to_entry(analysis, verbose=verbose, threshold=threshold)) return entry_list ","def check_ip_command(ip, days=MAX_AGE, verbose=VERBOSE, threshold=THRESHOLD): ip_list = argToList(ip) entry_list = [] for current_ip in ip_list: params = { ""ipAddress"": corrent_ip, ""maxAgeInDays"": days } if verbose: params['verbose'] = ""verbose"" analysis = http_request(""GET"", url_suffix=CHECK_CMD, params=params).get(""data"") entry_list.append(analysis_to_entry(analysis, verbose=verbose, threshold=threshold)) return entry_list " 39920,"def oracle_fallback_gas_price_strategy(web3: Web3, transaction_params: TxParams = None) -> Wei: oracles = (EtherchainGasPriceOracle, UpvestGasPriceOracle) for gas_price_oracle_class in oracles: try: gas_strategy = gas_price_oracle_class.construct_gas_strategy() gas_price = gas_strategy(web3, transaction_params) except EtherchainGasPriceOracle.OracleError: continue else: return gas_price else: # Worst-case scenario, we get the price from the ETH node itself return rpc_gas_price_strategy(web3, transaction_params) ","def oracle_fallback_gas_price_strategy(web3: Web3, transaction_params: TxParams = None) -> Wei: oracles = (EtherchainGasPriceOracle, UpvestGasPriceOracle) for gas_price_oracle_class in oracles: try: gas_strategy = gas_price_oracle_class.construct_gas_strategy() gas_price = gas_strategy(web3, transaction_params) except Oracle.OracleError: continue else: return gas_price else: # Worst-case scenario, we get the price from the ETH node itself return rpc_gas_price_strategy(web3, transaction_params) " 54948,"def decompose_hamiltonian(H): """"""Decomposes a hamiltonian into tensor product of pauli matrices Args: H (matrix): dimensions 2**n Yields: list: coefficients for every tensor product of pauli matrix combinations list: tensor product of pauli matrix combinations """""" N = int(np.log2(len(H))) if len(H) - 2 ** N != 0: raise ValueError(""Hamiltonian should be in the form (n^2 x n^2), for any n>=1"") # paulis = [qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ] obs = [] coeffs = [] # for term in itertools.product(paulis, repeat=N): matrices = [i._matrix() for i in term] coeff = np.trace(functools.reduce(np.kron, matrices) @ H) / (2 ** N) # if not np.allclose(coeff, 0): coeffs.append(coeff) # if not all(t is qml.Identity for t in term): obs.append( functools.reduce( operator.matmul, [t(i) for i, t in enumerate(term) if t is not qml.Identity] ) ) else: obs.append(functools.reduce(operator.matmul, [t(i) for i, t in enumerate(term)])) # obs.append(functools.reduce(operator.matmul, [t(i) for i, t in enumerate(term)])) # return coeffs, obs ","def decompose_hamiltonian(H): """"""Decomposes a hamiltonian into tensor product of pauli matrices Args: H (matrix): dimensions 2**n Returns: tuple[list[float], list[~.Observable]]: Returns a list of tensor products of PennyLane Pauli observables, as well as the corresponding coefficients for each tensor product. """""" N = int(np.log2(len(H))) if len(H) - 2 ** N != 0: raise ValueError(""Hamiltonian should be in the form (n^2 x n^2), for any n>=1"") # paulis = [qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ] obs = [] coeffs = [] # for term in itertools.product(paulis, repeat=N): matrices = [i._matrix() for i in term] coeff = np.trace(functools.reduce(np.kron, matrices) @ H) / (2 ** N) # if not np.allclose(coeff, 0): coeffs.append(coeff) # if not all(t is qml.Identity for t in term): obs.append( functools.reduce( operator.matmul, [t(i) for i, t in enumerate(term) if t is not qml.Identity] ) ) else: obs.append(functools.reduce(operator.matmul, [t(i) for i, t in enumerate(term)])) # obs.append(functools.reduce(operator.matmul, [t(i) for i, t in enumerate(term)])) # return coeffs, obs " 50621,"def _get_localzone(_root='/'): """"""Tries to find the local timezone configuration. This method prefers finding the timezone name and passing that to pytz, over passing in the localtime file, as in the later case the zoneinfo name is unknown. The parameter _root makes the function look for files like /etc/localtime beneath the _root directory. This is primarily used by the tests. In normal usage you call the function without parameters. """""" tzenv = os.environ.get('TZ') if tzenv: return _tz_from_env(tzenv) # This is actually a pretty reliable way to test for the local time # zone on operating systems like OS X. On OS X especially this is the # only one that actually works. try: link_dst = os.readlink('/etc/localtime') except OSError: pass else: pos = link_dst.find('/zoneinfo/') if pos >= 0: zone_name = link_dst[pos + 10:] try: return pytz.timezone(zone_name) except pytz.UnknownTimeZoneError: pass # If we are on OS X now we are pretty sure that the rest of the # code will fail and just fall through until it hits the reading # of /etc/localtime and using it without name. At this point we # can invoke systemconfig which internally invokes ICU. ICU itself # does the same thing we do (readlink + compare file contents) but # since it knows where the zone files are that should be a bit # better than reimplementing the logic here. if sys.platform == 'darwin': try: c = subprocess.Popen(['systemsetup', '-gettimezone'], stdout=subprocess.PIPE) sys_result = c.communicate()[0] c.wait() tz_match = _systemconfig_tz.search(sys_result) if tz_match is not None: zone_name = tz_match.group(1) try: return pytz.timezone(zone_name) except pytz.UnknownTimeZoneError: pass # iOS doesn't come with systemsetup except FileNotFoundError: pass # Now look for distribution specific configuration files # that contain the timezone name. tzpath = os.path.join(_root, 'etc/timezone') if os.path.exists(tzpath): with open(tzpath, 'rb') as tzfile: data = tzfile.read() # Issue #3 in tzlocal was that /etc/timezone was a zoneinfo file. # That's a misconfiguration, but we need to handle it gracefully: if data[:5] != b'TZif2': etctz = data.strip().decode() # Get rid of host definitions and comments: if ' ' in etctz: etctz, dummy = etctz.split(' ', 1) if '#' in etctz: etctz, dummy = etctz.split('#', 1) return pytz.timezone(etctz.replace(' ', '_')) # CentOS has a ZONE setting in /etc/sysconfig/clock, # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and # Gentoo has a TIMEZONE setting in /etc/conf.d/clock # We look through these files for a timezone: timezone_re = re.compile(r'\s*(TIME)?ZONE\s*=\s*""(?P.+)""') for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'): tzpath = os.path.join(_root, filename) if not os.path.exists(tzpath): continue with open(tzpath) as tzfile: for line in tzfile: match = timezone_re.match(line) if match is not None: # We found a timezone etctz = match.group(""etctz"") return pytz.timezone(etctz.replace(' ', '_')) # No explicit setting existed. Use localtime for filename in ('etc/localtime', 'usr/local/etc/localtime'): tzpath = os.path.join(_root, filename) if not os.path.exists(tzpath): continue with open(tzpath, 'rb') as tzfile: return pytz.tzfile.build_tzinfo('local', tzfile) raise pytz.UnknownTimeZoneError('Can not find any timezone configuration') ","def _get_localzone(_root='/'): """"""Tries to find the local timezone configuration. This method prefers finding the timezone name and passing that to pytz, over passing in the localtime file, as in the later case the zoneinfo name is unknown. The parameter _root makes the function look for files like /etc/localtime beneath the _root directory. This is primarily used by the tests. In normal usage you call the function without parameters. """""" tzenv = os.environ.get('TZ') if tzenv: return _tz_from_env(tzenv) # This is actually a pretty reliable way to test for the local time # zone on operating systems like OS X. On OS X especially this is the # only one that actually works. try: link_dst = os.readlink('/etc/localtime') except OSError: pass else: pos = link_dst.find('/zoneinfo/') if pos >= 0: zone_name = link_dst[pos + 10:] try: return pytz.timezone(zone_name) except pytz.UnknownTimeZoneError: pass # If we are on OS X now we are pretty sure that the rest of the # code will fail and just fall through until it hits the reading # of /etc/localtime and using it without name. At this point we # can invoke systemconfig which internally invokes ICU. ICU itself # does the same thing we do (readlink + compare file contents) but # since it knows where the zone files are that should be a bit # better than reimplementing the logic here. if sys.platform == 'darwin': try: c = subprocess.Popen(['systemsetup', '-gettimezone'], stdout=subprocess.PIPE) # iOS doesn't come with systemsetup except FileNotFoundError: pass else: sys_result = c.communicate()[0] c.wait() tz_match = _systemconfig_tz.search(sys_result) if tz_match is not None: zone_name = tz_match.group(1) try: return pytz.timezone(zone_name) except pytz.UnknownTimeZoneError: pass # Now look for distribution specific configuration files # that contain the timezone name. tzpath = os.path.join(_root, 'etc/timezone') if os.path.exists(tzpath): with open(tzpath, 'rb') as tzfile: data = tzfile.read() # Issue #3 in tzlocal was that /etc/timezone was a zoneinfo file. # That's a misconfiguration, but we need to handle it gracefully: if data[:5] != b'TZif2': etctz = data.strip().decode() # Get rid of host definitions and comments: if ' ' in etctz: etctz, dummy = etctz.split(' ', 1) if '#' in etctz: etctz, dummy = etctz.split('#', 1) return pytz.timezone(etctz.replace(' ', '_')) # CentOS has a ZONE setting in /etc/sysconfig/clock, # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and # Gentoo has a TIMEZONE setting in /etc/conf.d/clock # We look through these files for a timezone: timezone_re = re.compile(r'\s*(TIME)?ZONE\s*=\s*""(?P.+)""') for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'): tzpath = os.path.join(_root, filename) if not os.path.exists(tzpath): continue with open(tzpath) as tzfile: for line in tzfile: match = timezone_re.match(line) if match is not None: # We found a timezone etctz = match.group(""etctz"") return pytz.timezone(etctz.replace(' ', '_')) # No explicit setting existed. Use localtime for filename in ('etc/localtime', 'usr/local/etc/localtime'): tzpath = os.path.join(_root, filename) if not os.path.exists(tzpath): continue with open(tzpath, 'rb') as tzfile: return pytz.tzfile.build_tzinfo('local', tzfile) raise pytz.UnknownTimeZoneError('Can not find any timezone configuration') " 57747,"def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" demisto_commands = {'azure-waf-policy-get': policy_get_command, 'azure-waf-policy-list-all-in-subscription': policy_get_list_by_subscription_command, 'azure-waf-policy-upsert': policy_upsert_command, 'azure-waf-policy-delete': policy_delete_command, 'azure-waf-auth-start': start_auth, 'azure-waf-auth-complete': complete_auth, 'azure-waf-test': test_connection, } params = demisto.params() command = demisto.command() args = demisto.args() client = AzureWAFClient( app_id=params.get('app_id', ''), subscription_id=params.get('subscription_id', ''), resource_group_name=params.get('resource_group_name', ''), verify=not params.get('insecure', False), proxy=params.get('proxy', False), ) demisto.debug(f'Command being called is {demisto.command()}') try: if command == 'test-module': raise ValueError(""Please run `!azure-waf-auth-start` and `!azure-waf-auth-complete` to log in."" "" For more details press the (?) button."") if command == 'azure-waf-test': # return_results(test_connection(client, **params)) pass else: return_results(demisto_commands[command](client, **args)) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') ","def main() -> None: """"""main function, parses params and runs command functions """""" demisto_commands = {'azure-waf-policy-get': policy_get_command, 'azure-waf-policy-list-all-in-subscription': policy_get_list_by_subscription_command, 'azure-waf-policy-upsert': policy_upsert_command, 'azure-waf-policy-delete': policy_delete_command, 'azure-waf-auth-start': start_auth, 'azure-waf-auth-complete': complete_auth, 'azure-waf-test': test_connection, } params = demisto.params() command = demisto.command() args = demisto.args() client = AzureWAFClient( app_id=params.get('app_id', ''), subscription_id=params.get('subscription_id', ''), resource_group_name=params.get('resource_group_name', ''), verify=not params.get('insecure', False), proxy=params.get('proxy', False), ) demisto.debug(f'Command being called is {demisto.command()}') try: if command == 'test-module': raise ValueError(""Please run `!azure-waf-auth-start` and `!azure-waf-auth-complete` to log in."" "" For more details press the (?) button."") if command == 'azure-waf-test': # return_results(test_connection(client, **params)) pass else: return_results(demisto_commands[command](client, **args)) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') " 32004,"def get_new_indicators(client: MandiantClient, last_run: str, indicator_type: str, limit: int) -> List: """""" Get new indicators list. Args: client (MandiantClient): client last_run (str): last run as free text or date format indicator_type (str): the desired type to fetch limit (int): number of indicator to fetch Returns: List: new indicators """""" start_date = dateparser.parse(last_run, settings={'TIMEZONE': 'UTC'}) if indicator_type == 'Indicators': # for indicator type the earliest time to fetch is 90 days ago earliest_fetch = dateparser.parse('90 days ago', settings={'TIMEZONE': 'UTC'}) start_date = max(earliest_fetch, start_date) params = {'start_epoch': int(start_date.timestamp()), 'limit': limit} if indicator_type == 'Indicators' else {} res = client._http_request(method=""GET"", url_suffix=f'/v4/{MAP_TYPE_TO_URL[indicator_type]}', timeout=client._timeout, params=params, ok_codes=[200]) new_indicators_list = res.get(MAP_TYPE_TO_RESPONSE[indicator_type], []) date_key = 'last_seen' if indicator_type == 'Indicators' else 'last_updated' new_indicators_list.sort(key=lambda x: dateparser.parse(x.get(date_key)), reverse=True) # new to old new_indicators_list = list(filter(lambda x: dateparser.parse(x[date_key]).timestamp() > start_date.timestamp(), new_indicators_list)) return new_indicators_list ","def get_new_indicators(client: MandiantClient, last_run: str, indicator_type: str, limit: int) -> List: """""" Get new indicators list. Args: client (MandiantClient): client last_run (str): last run as free text or date format indicator_type (str): the desired type to fetch limit (int): number of indicator to fetch Returns: List: new indicators """""" start_date = arg_to_datetime(last_run) if indicator_type == 'Indicators': # for indicator type the earliest time to fetch is 90 days ago earliest_fetch = dateparser.parse('90 days ago', settings={'TIMEZONE': 'UTC'}) start_date = max(earliest_fetch, start_date) params = {'start_epoch': int(start_date.timestamp()), 'limit': limit} if indicator_type == 'Indicators' else {} res = client._http_request(method=""GET"", url_suffix=f'/v4/{MAP_TYPE_TO_URL[indicator_type]}', timeout=client._timeout, params=params, ok_codes=[200]) new_indicators_list = res.get(MAP_TYPE_TO_RESPONSE[indicator_type], []) date_key = 'last_seen' if indicator_type == 'Indicators' else 'last_updated' new_indicators_list.sort(key=lambda x: dateparser.parse(x.get(date_key)), reverse=True) # new to old new_indicators_list = list(filter(lambda x: dateparser.parse(x[date_key]).timestamp() > start_date.timestamp(), new_indicators_list)) return new_indicators_list " 13167,"def _transform_types( data: Dict[str, Any], custom_types: dict, transform_files: Optional[bool] = True ) -> Tuple[dict, dict]: """"""Copy the data dict with attributes that have custom types and transform them before being sent to the server. If ``transform_files`` is ``True`` (default), also populates the ``files`` dict for FileAttribute types with tuples to prepare fields for requests' MultipartEncoder: https://toolbelt.readthedocs.io/en/latest/user.html#multipart-form-data-encoder Returns: A tuple of the transformed data dict and files dict"""""" # Duplicate data to avoid messing with what the user sent us data = data.copy() files = {} for attr_name, type_cls in custom_types.items(): if attr_name not in data.keys(): continue type_obj = type_cls(data[attr_name]) # if the type if FileAttribute we need to pass the data as file if transform_files and isinstance(type_obj, types.FileAttribute): key = type_obj.get_file_name(attr_name) files[attr_name] = (key, data.pop(attr_name)) else: data[attr_name] = type_obj.get_for_api() return data, files ","def _transform_types( data: Dict[str, Any], custom_types: dict, transform_files: Optional[bool] = True ) -> Tuple[dict, dict]: """"""Copy the data dict with attributes that have custom types and transform them before being sent to the server. If ``transform_files`` is ``True`` (default), also populates the ``files`` dict for FileAttribute types with tuples to prepare fields for requests' MultipartEncoder: https://toolbelt.readthedocs.io/en/latest/user.html#multipart-form-data-encoder Returns: A tuple of the transformed data dict and files dict"""""" # Duplicate data to avoid messing with what the user sent us data = data.copy() files = {} for attr_name, type_cls in custom_types.items(): if attr_name not in data: continue type_obj = type_cls(data[attr_name]) # if the type if FileAttribute we need to pass the data as file if transform_files and isinstance(type_obj, types.FileAttribute): key = type_obj.get_file_name(attr_name) files[attr_name] = (key, data.pop(attr_name)) else: data[attr_name] = type_obj.get_for_api() return data, files " 17433,"def _get_boolean_with_default(option, default): global_choice = OPTIONS[option] if global_choice == ""default"": return default elif global_choice in [True, False]: return global_choice else: raise ValueError( f""The global option f{option} must be one of True, False or 'default'."" ) ","def _get_boolean_with_default(option, default): global_choice = OPTIONS[option] if global_choice == ""default"": return default elif global_choice in [True, False]: return global_choice else: raise ValueError( f""The global option {option} must be one of True, False or 'default'."" ) " 45616,"def __gather_clearcoat_extension(blender_material, export_settings): clearcoat_enabled = False has_clearcoat_texture = False has_clearcoat_roughness_texture = False clearcoat_extension = {} clearcoat_roughness_slots = () clearcoat_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, 'Clearcoat') clearcoat_roughness_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, 'Clearcoat Roughness') clearcoat_normal_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, 'Clearcoat Normal') if isinstance(clearcoat_socket, bpy.types.NodeSocket) and not clearcoat_socket.is_linked: clearcoat_extension['clearcoatFactor'] = clearcoat_socket.default_value clearcoat_enabled = clearcoat_extension['clearcoatFactor'] > 0 elif __has_image_node_from_socket(clearcoat_socket): clearcoat_extension['clearcoatFactor'] = 1 has_clearcoat_texture = True clearcoat_enabled = True if not clearcoat_enabled: return None if isinstance(clearcoat_roughness_socket, bpy.types.NodeSocket) and not clearcoat_roughness_socket.is_linked: clearcoat_extension['clearcoatRoughnessFactor'] = clearcoat_roughness_socket.default_value elif __has_image_node_from_socket(clearcoat_roughness_socket): clearcoat_extension['clearcoatRoughnessFactor'] = 1 has_clearcoat_roughness_texture = True # Pack clearcoat (R) and clearcoatRoughness (G) channels. if has_clearcoat_texture and has_clearcoat_roughness_texture: clearcoat_roughness_slots = (clearcoat_socket, clearcoat_roughness_socket,) elif has_clearcoat_texture: clearcoat_roughness_slots = (clearcoat_socket,) elif has_clearcoat_roughness_texture: clearcoat_roughness_slots = (clearcoat_roughness_socket,) if len(clearcoat_roughness_slots) > 0: combined_texture = gltf2_blender_gather_texture_info.gather_texture_info(clearcoat_roughness_slots, export_settings) if has_clearcoat_texture: clearcoat_extension['clearcoatTexture'] = combined_texture if has_clearcoat_roughness_texture: clearcoat_extension['clearcoatRoughnessTexture'] = combined_texture if __has_image_node_from_socket(clearcoat_normal_socket): clearcoat_extension['clearcoatNormalTexture'] = gltf2_blender_gather_texture_info.gather_texture_info( (clearcoat_normal_socket,), export_settings ) return Extension('KHR_materials_clearcoat', clearcoat_extension, False) ","def __gather_clearcoat_extension(blender_material, export_settings): clearcoat_enabled = False has_clearcoat_texture = False has_clearcoat_roughness_texture = False clearcoat_extension = {} clearcoat_roughness_slots = () clearcoat_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, 'Clearcoat') clearcoat_roughness_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, 'Clearcoat Roughness') clearcoat_normal_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, 'Clearcoat Normal') if isinstance(clearcoat_socket, bpy.types.NodeSocket) and not clearcoat_socket.is_linked: clearcoat_extension['clearcoatFactor'] = clearcoat_socket.default_value clearcoat_enabled = clearcoat_extension['clearcoatFactor'] > 0 elif __has_image_node_from_socket(clearcoat_socket): clearcoat_extension['clearcoatFactor'] = 1 has_clearcoat_texture = True clearcoat_enabled = True if not clearcoat_enabled: return None if isinstance(clearcoat_roughness_socket, bpy.types.NodeSocket) and not clearcoat_roughness_socket.is_linked: clearcoat_extension['clearcoatRoughnessFactor'] = clearcoat_roughness_socket.default_value elif __has_image_node_from_socket(clearcoat_roughness_socket): clearcoat_extension['clearcoatRoughnessFactor'] = 1 has_clearcoat_roughness_texture = True # Pack clearcoat (R) and clearcoatRoughness (G) channels. if has_clearcoat_texture and has_clearcoat_roughness_texture: clearcoat_roughness_slots = (clearcoat_socket, clearcoat_roughness_socket,) elif has_clearcoat_texture: clearcoat_roughness_slots = (clearcoat_socket,) elif has_clearcoat_roughness_texture: clearcoat_roughness_slots = (clearcoat_roughness_socket,) if len(clearcoat_roughness_slots) > 0: combined_texture = gltf2_blender_gather_texture_info.gather_texture_info(clearcoat_roughness_slots, export_settings) if has_clearcoat_texture: clearcoat_extension['clearcoatTexture'] = combined_texture if has_clearcoat_roughness_texture: clearcoat_extension['clearcoatRoughnessTexture'] = combined_texture if __has_image_node_from_socket(clearcoat_normal_socket): clearcoat_extension['clearcoatNormalTexture'] = gltf2_blender_gather_material_normal_texture_info_class.gather_material_normal_texture_info_class( (clearcoat_normal_socket,), export_settings ) return Extension('KHR_materials_clearcoat', clearcoat_extension, False) " 44040,"def transform_hamiltonian(h, generator, paulix_wires, paulix_sector=None): r""""""Transform a Hamiltonian with a Clifford operator and taper qubits. The Hamiltonian is transformed as :math:`H' = U^{\dagger} H U` where :math:`U` is a Clifford operator. The transformed Hamiltonian acts trivially on some qubits which are then replaced with the eigenvalues of their corresponding Pauli/Identity operator. Args: h (Hamiltonian): PennyLane Hamiltonian generator (list[Hamiltonian]): generators expressed as PennyLane Hamiltonians paulix_wires (list[int]): indices of the wires the PauliX operator acts on paulix_sector list([list[int]]): list of eigenvalues of the PauliX operators Returns: (list[tuple[list[int], qml.Hamiltonian]]): paulix sector and its corresponding Hamiltonian **Example** >>> symbols = [""H"", ""H""] >>> geometry = np.array([[0.0, 0.0, -0.69440367], [0.0, 0.0, 0.69440367]], requires_grad=False) >>> mol = qml.hf.Molecule(symbols, geometry) >>> H = qml.hf.generate_hamiltonian(mol)() >>> t1 = qml.Hamiltonian([1.0], [qml.grouping.string_to_pauli_word('ZZII')]) >>> t2 = qml.Hamiltonian([1.0], [qml.grouping.string_to_pauli_word('ZIZI')]) >>> t3 = qml.Hamiltonian([1.0], [qml.grouping.string_to_pauli_word('ZIIZ')]) >>> generator = [t1, t2, t3] >>> paulix_wires = [1, 2, 3] >>> paulix_sector = [[1, -1, -1]] >>> transform_hamiltonian(H, generator, paulix_wires, paulix_sector) [([1, -1, -1], )] """""" u = clifford(generator, paulix_wires) h = _observable_mult(_observable_mult(u, h), u) if paulix_sector is None: paulix_sector = itertools.product([1, -1], repeat=len(paulix_wires)) result = [] wiremap = dict(zip(h.wires, h.wires)) for sector in paulix_sector: val = np.ones(len(h.terms[0])) * complex(1.0) for idx, w in enumerate(paulix_wires): for i in range(len(h.terms[0])): s = qml.grouping.pauli_word_to_string(h.terms[1][i], wire_map=wiremap) if s[w] == ""X"": val[i] *= sector[idx] o = [] for i in range(len(h.terms[0])): s = qml.grouping.pauli_word_to_string(h.terms[1][i], wire_map=wiremap) wires = [x for x in h.wires if x not in paulix_wires] o.append(qml.grouping.string_to_pauli_word("""".join([s[i] for i in wires]))) c = anp.multiply(val, h.terms[0]) c = qml.math.stack(c) result.append((list(sector), _simplify(qml.Hamiltonian(c, o)))) return result ","def transform_hamiltonian(h, generator, paulix_wires, paulix_sector=None): r""""""Transform a Hamiltonian with a Clifford operator and taper qubits. The Hamiltonian is transformed as :math:`H' = U^{\dagger} H U` where :math:`U` is a Clifford operator. The transformed Hamiltonian acts trivially on some qubits which are then replaced with the eigenvalues of their corresponding Pauli/Identity operator. Args: h (Hamiltonian): PennyLane Hamiltonian generator (list[Hamiltonian]): generators expressed as PennyLane Hamiltonians paulix_wires (list[int]): indices of the wires the PauliX operator acts on paulix_sector list([list[int]]): list of eigenvalues of the PauliX operators Returns: (list[tuple[list[int], qml.Hamiltonian]]): paulix sector and its corresponding tapered Hamiltonian **Example** >>> symbols = [""H"", ""H""] >>> geometry = np.array([[0.0, 0.0, -0.69440367], [0.0, 0.0, 0.69440367]], requires_grad=False) >>> mol = qml.hf.Molecule(symbols, geometry) >>> H = qml.hf.generate_hamiltonian(mol)() >>> t1 = qml.Hamiltonian([1.0], [qml.grouping.string_to_pauli_word('ZZII')]) >>> t2 = qml.Hamiltonian([1.0], [qml.grouping.string_to_pauli_word('ZIZI')]) >>> t3 = qml.Hamiltonian([1.0], [qml.grouping.string_to_pauli_word('ZIIZ')]) >>> generator = [t1, t2, t3] >>> paulix_wires = [1, 2, 3] >>> paulix_sector = [[1, -1, -1]] >>> transform_hamiltonian(H, generator, paulix_wires, paulix_sector) [([1, -1, -1], )] """""" u = clifford(generator, paulix_wires) h = _observable_mult(_observable_mult(u, h), u) if paulix_sector is None: paulix_sector = itertools.product([1, -1], repeat=len(paulix_wires)) result = [] wiremap = dict(zip(h.wires, h.wires)) for sector in paulix_sector: val = np.ones(len(h.terms[0])) * complex(1.0) for idx, w in enumerate(paulix_wires): for i in range(len(h.terms[0])): s = qml.grouping.pauli_word_to_string(h.terms[1][i], wire_map=wiremap) if s[w] == ""X"": val[i] *= sector[idx] o = [] for i in range(len(h.terms[0])): s = qml.grouping.pauli_word_to_string(h.terms[1][i], wire_map=wiremap) wires = [x for x in h.wires if x not in paulix_wires] o.append(qml.grouping.string_to_pauli_word("""".join([s[i] for i in wires]))) c = anp.multiply(val, h.terms[0]) c = qml.math.stack(c) result.append((list(sector), _simplify(qml.Hamiltonian(c, o)))) return result " 22927,"def run_command(args, expected_exit_code=0, max_output_chars=256, env=None): for a in args: assert isinstance(a, str) # Travis only prints ASCII print(('>> %s' % "" "".join([a for a in args]))) try: output = subprocess.check_output(args, env=env) exitcode = 0 except subprocess.CalledProcessError as e: output = e.output exitcode = e.returncode except Exception as e: output = traceback.format_exc() exitcode = 'test-cli exception' print((Colorizer.cyan("" (exit code %s, expected %s)"" % (exitcode, expected_exit_code)))) print((sanitize(output, max_output_chars))) assert expected_exit_code == exitcode, 'Exit codes don\'t match' return output.rstrip() ","def run_command(args, expected_exit_code=0, max_output_chars=256, env=None): for a in args: pass # Travis only prints ASCII print(('>> %s' % "" "".join([a for a in args]))) try: output = subprocess.check_output(args, env=env) exitcode = 0 except subprocess.CalledProcessError as e: output = e.output exitcode = e.returncode except Exception as e: output = traceback.format_exc() exitcode = 'test-cli exception' print((Colorizer.cyan("" (exit code %s, expected %s)"" % (exitcode, expected_exit_code)))) print((sanitize(output, max_output_chars))) assert expected_exit_code == exitcode, 'Exit codes don\'t match' return output.rstrip() " 49883,"def _compute_wavelet(clearsky_index, dt=None): """""" Compute the wavelet transform on the input clear_sky time series. Uses a top hat wavelet [-1,1,1,-1] shape, based on the difference of successive centered moving averages. Smallest scale (filter size of 2) is a degenerate case that resembles a Haar wavelet. Returns one level of approximation coefficient (CAn) and n levels of detail coefficients (CD1, CD2, ..., CDn-1, CDn). Parameters ---------- clearsky_index : numeric or pandas.Series Clear Sky Index time series that will be smoothed. dt : float, default None The time series time delta. By default, is inferred from the clearsky_index. Must be specified for a time series that doesn't include an index. Units of seconds [s]. Returns ------- wavelet: numeric The individual wavelets for the time series. Format follows increasing scale (decreasing frequency): [CD1, CD2, ..., CDn, CAn] tmscales: numeric The timescales associated with the wavelets in seconds [s] References ---------- [1] M. Lave, J. Kleissl and J.S. Stein. A Wavelet-Based Variability Model (WVM) for Solar PV Power Plants. IEEE Transactions on Sustainable Energy, vol. 4, no. 2, pp. 501-509, 2013. [2] Wavelet Variability Model - Matlab Code: https://pvpmc.sandia.gov/applications/wavelet-variability-model/ """""" # Added by Joe Ranalli (@jranalli), Penn State Hazleton, 2019 try: # Assume it's a pandas type vals = clearsky_index.values.flatten() except AttributeError: # Assume it's a numpy type vals = clearsky_index.flatten() if dt is None: raise ValueError(""dt must be specified for numpy type inputs."") else: # flatten() succeeded, thus it's a pandas type, so get its dt try: # Assume it's a time series type index dt = (clearsky_index.index[1] - clearsky_index.index[0]).seconds except AttributeError: # It must just be a numeric index dt = (clearsky_index.index[1] - clearsky_index.index[0]) # Pad the series on both ends in time and place in a dataframe cs_long = np.pad(vals, (len(vals), len(vals)), 'symmetric') cs_long = pd.DataFrame(cs_long) # Compute wavelet time scales min_tmscale = np.ceil(np.log(dt)/np.log(2)) # Minimum wavelet timescale max_tmscale = int(13 - min_tmscale) # maximum wavelet timescale tmscales = np.zeros(max_tmscale) csi_mean = np.zeros([max_tmscale, len(cs_long)]) # Skip averaging for the 0th scale csi_mean[0, :] = cs_long.values.flatten() tmscales[0] = 1 # Loop for all time scales we will consider for i in np.arange(1, max_tmscale): tmscales[i] = 2**i * dt # Wavelet integration time scale intvlen = 2**i # Wavelet integration time series interval # Rolling average, retains only lower frequencies than interval # Produces slightly different end effects than the MATLAB version df = cs_long.rolling(window=intvlen, center=True, min_periods=1).mean() # Fill nan's in both directions df = df.fillna(method='bfill').fillna(method='ffill') # Pop values back out of the dataframe and store csi_mean[i, :] = df.values.flatten() # Shift to account for different indexing in MATLAB moving average csi_mean[i, :] = np.roll(csi_mean[i, :], -1) csi_mean[i, -1] = csi_mean[i, -2] # Calculate detail coefficients by difference between successive averages wavelet_long = np.zeros(csi_mean.shape) for i in np.arange(0, max_tmscale-1): wavelet_long[i, :] = csi_mean[i, :] - csi_mean[i+1, :] wavelet_long[-1, :] = csi_mean[-1, :] # Lowest freq (CAn) # Clip off the padding and just return the original time window wavelet = np.zeros([max_tmscale, len(vals)]) for i in np.arange(0, max_tmscale): wavelet[i, :] = wavelet_long[i, len(vals): 2*len(vals)] return wavelet, tmscales ","def _compute_wavelet(clearsky_index, dt=None): """""" Compute the wavelet transform on the input clear_sky time series. Uses a top hat wavelet [-1,1,1,-1] shape, based on the difference of successive centered moving averages. Smallest scale (filter size of 2) is a degenerate case that resembles a Haar wavelet. Returns one level of approximation coefficient (CAn) and n levels of detail coefficients (CD1, CD2, ..., CDn-1, CDn). Parameters ---------- clearsky_index : numeric or pandas.Series Clear Sky Index time series that will be smoothed. dt : float, default None The time series time delta. By default, is inferred from the clearsky_index. Must be specified for a time series that doesn't include an index. Units of seconds [s]. Returns ------- wavelet: numeric The individual wavelets for the time series. Format follows increasing scale (decreasing frequency): [CD1, CD2, ..., CDn, CAn] tmscales: numeric The timescales associated with the wavelets in seconds [s] References ---------- [1] M. Lave, J. Kleissl and J.S. Stein. A Wavelet-Based Variability Model (WVM) for Solar PV Power Plants. IEEE Transactions on Sustainable Energy, vol. 4, no. 2, pp. 501-509, 2013. .. [2] Wavelet Variability Model - Matlab Code: https://pvpmc.sandia.gov/applications/wavelet-variability-model/ """""" # Added by Joe Ranalli (@jranalli), Penn State Hazleton, 2019 try: # Assume it's a pandas type vals = clearsky_index.values.flatten() except AttributeError: # Assume it's a numpy type vals = clearsky_index.flatten() if dt is None: raise ValueError(""dt must be specified for numpy type inputs."") else: # flatten() succeeded, thus it's a pandas type, so get its dt try: # Assume it's a time series type index dt = (clearsky_index.index[1] - clearsky_index.index[0]).seconds except AttributeError: # It must just be a numeric index dt = (clearsky_index.index[1] - clearsky_index.index[0]) # Pad the series on both ends in time and place in a dataframe cs_long = np.pad(vals, (len(vals), len(vals)), 'symmetric') cs_long = pd.DataFrame(cs_long) # Compute wavelet time scales min_tmscale = np.ceil(np.log(dt)/np.log(2)) # Minimum wavelet timescale max_tmscale = int(13 - min_tmscale) # maximum wavelet timescale tmscales = np.zeros(max_tmscale) csi_mean = np.zeros([max_tmscale, len(cs_long)]) # Skip averaging for the 0th scale csi_mean[0, :] = cs_long.values.flatten() tmscales[0] = 1 # Loop for all time scales we will consider for i in np.arange(1, max_tmscale): tmscales[i] = 2**i * dt # Wavelet integration time scale intvlen = 2**i # Wavelet integration time series interval # Rolling average, retains only lower frequencies than interval # Produces slightly different end effects than the MATLAB version df = cs_long.rolling(window=intvlen, center=True, min_periods=1).mean() # Fill nan's in both directions df = df.fillna(method='bfill').fillna(method='ffill') # Pop values back out of the dataframe and store csi_mean[i, :] = df.values.flatten() # Shift to account for different indexing in MATLAB moving average csi_mean[i, :] = np.roll(csi_mean[i, :], -1) csi_mean[i, -1] = csi_mean[i, -2] # Calculate detail coefficients by difference between successive averages wavelet_long = np.zeros(csi_mean.shape) for i in np.arange(0, max_tmscale-1): wavelet_long[i, :] = csi_mean[i, :] - csi_mean[i+1, :] wavelet_long[-1, :] = csi_mean[-1, :] # Lowest freq (CAn) # Clip off the padding and just return the original time window wavelet = np.zeros([max_tmscale, len(vals)]) for i in np.arange(0, max_tmscale): wavelet[i, :] = wavelet_long[i, len(vals): 2*len(vals)] return wavelet, tmscales " 55086,"def generate_basis_set(l, alpha, coeff, r): r""""""Generate a set of basis function objects. Args: l list((tuple[int])): angular momentum numbers of the basis function. alpha list((array(float))): exponents of the Gaussian functions forming basis functions coeff list((array(float))): coefficients of the contracted Gaussian functions r list((array(float))): positions of the Gaussian functions forming the basis functions Returns: list(BasisFunction): list containing a set of basis function objects. **Example** >>> l = [(0, 0, 0), (0, 0, 0)] >>> exponents = [[3.42525091, 0.62391373, 0.1688554], [3.42525091, 0.62391373, 0.1688554]] >>> coefficients = [[0.15432897, 0.53532814, 0.44463454], [0.15432897, 0.53532814, 0.44463454]] >>> centers = [[0.0, 0.0, -0.694349], [0.0, 0.0, 0.694349]] >>> basis_set = generate_basis_set(l, exponents, coefficients, centers) >>> print(basis_set) [, ] """""" return [BasisFunction(l[i], alpha[i], coeff[i], r[i]) for i in range(len(l))] ","def generate_basis_set(l, alpha, coeff, r): r""""""Generate a set of basis function objects. Args: l (list[tuple[int]]): angular momentum numbers of the basis function alpha (list[array[float]]): exponents of the Gaussian functions forming basis functions coeff (list[array[float]]): coefficients of the contracted Gaussian functions r (list[array[float]]): positions of the Gaussian functions forming the basis functions Returns: list(BasisFunction): list containing a set of basis function objects. **Example** >>> l = [(0, 0, 0), (0, 0, 0)] >>> exponents = [[3.42525091, 0.62391373, 0.1688554], [3.42525091, 0.62391373, 0.1688554]] >>> coefficients = [[0.15432897, 0.53532814, 0.44463454], [0.15432897, 0.53532814, 0.44463454]] >>> centers = [[0.0, 0.0, -0.694349], [0.0, 0.0, 0.694349]] >>> basis_set = generate_basis_set(l, exponents, coefficients, centers) >>> print(basis_set) [, ] """""" return [BasisFunction(l[i], alpha[i], coeff[i], r[i]) for i in range(len(l))] " 35710,"def _validate_trainable_layers( pretrained: bool, trainable_backbone_layers: Optional[int], max_value: int, default_value: int, ) -> int: # don't freeze any layers if pretrained model or backbone is not used if not pretrained: if trainable_backbone_layers is not None: warnings.warn( ""Changing trainable_backbone_layers has not effect if "" ""neither pretrained nor pretrained_backbone have been set to True, "" f""falling back to trainable_backbone_layers={max_value} so that all layers are trainable"" ) trainable_backbone_layers = max_value # by default freeze first blocks if trainable_backbone_layers is None: trainable_backbone_layers = default_value if trainable_backbone_layers not in range(0, max_value + 1): raise ValueError( f""trainable_layers expected to be in between [0,{max_value}], got {trainable_backbone_layers} "" ) return trainable_backbone_layers ","def _validate_trainable_layers( pretrained: bool, trainable_backbone_layers: Optional[int], max_value: int, default_value: int, ) -> int: # don't freeze any layers if pretrained model or backbone is not used if not pretrained: if trainable_backbone_layers is not None: warnings.warn( ""Changing trainable_backbone_layers has not effect if "" ""neither pretrained nor pretrained_backbone have been set to True, "" f""falling back to trainable_backbone_layers={max_value} so that all layers are trainable"" ) trainable_backbone_layers = max_value # by default freeze first blocks if trainable_backbone_layers is None: trainable_backbone_layers = default_value if trainable_backbone_layers < 0 or trainable_backbone_layers > max_value: raise ValueError( f""trainable_layers expected to be in between [0,{max_value}], got {trainable_backbone_layers} "" ) return trainable_backbone_layers " 42324,"def setup(bot: Bot) -> None: """"""Load the BotSource cog."""""" bot.add_cog(Resources(bot)) ","def setup(bot: Bot) -> None: """"""Load the Resources cog."""""" bot.add_cog(Resources(bot)) " 45900,"def filter2d( input: torch.Tensor, kernel: torch.Tensor, border_type: str = 'reflect', normalized: bool = False, padding: str = 'same' ) -> torch.Tensor: r""""""Convolve a tensor with a 2d kernel. The function applies a given kernel to a tensor. The kernel is applied independently at each depth channel of the tensor. Before applying the kernel, the function applies padding according to the specified mode so that the output remains in the same shape. Args: input: the input tensor with shape of :math:`(B, C, H, W)`. kernel: the kernel to be convolved with the input tensor. The kernel shape must be :math:`(1, kH, kW)` or :math:`(B, kH, kW)`. border_type: the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. normalized: If True, kernel will be L1 normalized. padding: This defines the type of padding. 2 modes available ``'same'`` or ``'valid'`` Return: torch.Tensor: the convolved tensor of same size and numbers of channels as the input with shape :math:`(B, C, H, W)`. Example: >>> input = torch.tensor([[[ ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 5., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.],]]]) >>> kernel = torch.ones(1, 3, 3) >>> filter2d(input, kernel, padding='same') tensor([[[[0., 0., 0., 0., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 0., 0., 0., 0.]]]]) """""" if not isinstance(input, torch.Tensor): raise TypeError(f""Input border_type is not torch.Tensor. Got {type(input)}"") if not isinstance(kernel, torch.Tensor): raise TypeError(f""Input border_type is not torch.Tensor. Got {type(kernel)}"") if not isinstance(border_type, str): raise TypeError(f""Input border_type is not string. Got {type(kernel)}"") if not isinstance(padding, str): raise TypeError(f""Input padding is not string. Got {type(padding)}"") if not len(input.shape) == 4: raise ValueError(f""Invalid input shape, we expect BxCxHxW. Got: {input.shape}"") if not len(kernel.shape) == 3 and kernel.shape[0] != 1: raise ValueError(f""Invalid kernel shape, we expect 1xHxW. Got: {kernel.shape}"") # prepare kernel b, c, h, w = input.shape tmp_kernel: torch.Tensor = kernel.unsqueeze(1).to(input) if normalized: tmp_kernel = normalize_kernel2d(tmp_kernel) tmp_kernel = tmp_kernel.expand(-1, c, -1, -1) height, width = tmp_kernel.shape[-2:] # pad the input tensor if padding == 'same': padding_shape: List[int] = _compute_padding([height, width]) input = F.pad(input, padding_shape, mode=border_type) # kernel and input tensor reshape to align element-wise or batch-wise params tmp_kernel = tmp_kernel.reshape(-1, 1, height, width) input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1)) # convolve the tensor with the kernel. output = F.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1) if padding == 'same': return output.view(b, c, h, w) else: return output.view(b, c, h - height + 1, w - width + 1) ","def filter2d( input: torch.Tensor, kernel: torch.Tensor, border_type: str = 'reflect', normalized: bool = False, padding: str = 'same' ) -> torch.Tensor: r""""""Convolve a tensor with a 2d kernel. The function applies a given kernel to a tensor. The kernel is applied independently at each depth channel of the tensor. Before applying the kernel, the function applies padding according to the specified mode so that the output remains in the same shape. Args: input: the input tensor with shape of :math:`(B, C, H, W)`. kernel: the kernel to be convolved with the input tensor. The kernel shape must be :math:`(1, kH, kW)` or :math:`(B, kH, kW)`. border_type: the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. normalized: If True, kernel will be L1 normalized. padding: This defines the type of padding. 2 modes available ``'same'`` or ``'valid'`` Return: torch.Tensor: the convolved tensor of same size and numbers of channels as the input with shape :math:`(B, C, H, W)`. Example: >>> input = torch.tensor([[[ ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 5., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.],]]]) >>> kernel = torch.ones(1, 3, 3) >>> filter2d(input, kernel, padding='same') tensor([[[[0., 0., 0., 0., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 0., 0., 0., 0.]]]]) """""" if not isinstance(input, torch.Tensor): raise TypeError(f""Input border_type is not torch.Tensor. Got {type(input)}"") if not isinstance(kernel, torch.Tensor): raise TypeError(f""Input border_type is not torch.Tensor. Got {type(kernel)}"") if not isinstance(border_type, str): raise TypeError(f""Input border_type is not string. Got {type(kernel)}"") if not isinstance(padding, str): raise TypeError(f""Input padding is not string. Got {type(padding)}"") if not len(input.shape) == 4: raise ValueError(f""Invalid input shape, we expect BxCxHxW. Got: {input.shape}"") if not len(kernel.shape) == 3 and kernel.shape[0] != 1: raise ValueError(f""Invalid kernel shape, we expect 1xHxW. Got: {kernel.shape}"") # prepare kernel b, c, h, w = input.shape tmp_kernel: torch.Tensor = kernel.unsqueeze(1).to(input) if normalized: tmp_kernel = normalize_kernel2d(tmp_kernel) tmp_kernel = tmp_kernel.expand(-1, c, -1, -1) height, width = tmp_kernel.shape[-2:] # pad the input tensor if padding == 'same': padding_shape: List[int] = _compute_padding([height, width]) input = F.pad(input, padding_shape, mode=border_type) # kernel and input tensor reshape to align element-wise or batch-wise params tmp_kernel = tmp_kernel.reshape(-1, 1, height, width) input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1)) # convolve the tensor with the kernel. output = F.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1) if padding == 'same': return output.view(b, c, h, w) else: out = output.view(b, c, h - height + 1, w - width + 1) " 23840,"def cross_building(conanfile=None, skip_x64_x86=False): """""" Check it we are cross building comparing the *build* and *host* settings. Returns ``True`` in the case that we are cross-building. :param conanfile: The current recipe object. Always use ``self``. :param skip_x64_x86: Do not consider cross building when building to 32 bits from 64 bits: x86_64 to x86, sparcv9 to sparc or ppc64 to ppc32 :return: ``True`` if we are cross building, ``False`` otherwise. """""" build_os = conanfile.settings_build.get_safe('os') build_arch = conanfile.settings_build.get_safe('arch') host_os = conanfile.settings.get_safe(""os"") host_arch = conanfile.settings.get_safe(""arch"") if skip_x64_x86 and host_os is not None and (build_os == host_os) and \ host_arch is not None and ((build_arch == ""x86_64"") and (host_arch == ""x86"") or (build_arch == ""sparcv9"") and (host_arch == ""sparc"") or (build_arch == ""ppc64"") and (host_arch == ""ppc32"")): return False if host_os is not None and (build_os != host_os): return True if host_arch is not None and (build_arch != host_arch): return True return False ","def cross_building(conanfile=None, skip_x64_x86=False): """""" Check if we are cross building comparing the *build* and *host* settings. Returns ``True`` in the case that we are cross-building. :param conanfile: The current recipe object. Always use ``self``. :param skip_x64_x86: Do not consider cross building when building to 32 bits from 64 bits: x86_64 to x86, sparcv9 to sparc or ppc64 to ppc32 :return: ``True`` if we are cross building, ``False`` otherwise. """""" build_os = conanfile.settings_build.get_safe('os') build_arch = conanfile.settings_build.get_safe('arch') host_os = conanfile.settings.get_safe(""os"") host_arch = conanfile.settings.get_safe(""arch"") if skip_x64_x86 and host_os is not None and (build_os == host_os) and \ host_arch is not None and ((build_arch == ""x86_64"") and (host_arch == ""x86"") or (build_arch == ""sparcv9"") and (host_arch == ""sparc"") or (build_arch == ""ppc64"") and (host_arch == ""ppc32"")): return False if host_os is not None and (build_os != host_os): return True if host_arch is not None and (build_arch != host_arch): return True return False " 31136,"def get_from_version_and_to_version_bounderies(all_modified_files_paths: set, id_set: dict, modified_metadata_list: set = None) -> Tuple[str, str]: """"""Computes the lowest from version of the modified files, the highest from version and the highest to version of the modified files. In case that max_from_version is higher than max to version - to version will be the the highest default. Args: all_modified_files_paths: All modified files id_set: the content of the id.set_json Returns: (string, string). The boundaries of the lowest from version (defaults to 0.0.0) and highest to version (defaults to 99.99.99) """""" modified_metadata_list = modified_metadata_list if modified_metadata_list else set([]) max_to_version = LooseVersion('0.0.0') min_from_version = LooseVersion('99.99.99') max_from_version = LooseVersion('0.0.0') for metadata in modified_metadata_list: pack_metadata_path = os.path.join(tools.pack_name_to_path(metadata), PACKS_PACK_META_FILE_NAME) pack_metadata = get_pack_metadata(pack_metadata_path) from_version = pack_metadata.get('serverMinVersion') to_version = pack_metadata.get('serverMaxVersion') if from_version: min_from_version = min(min_from_version, LooseVersion(from_version)) max_from_version = max(max_from_version, LooseVersion(from_version)) if to_version: max_to_version = max(max_to_version, LooseVersion(to_version)) for artifacts in id_set.values(): for artifact_dict in artifacts: for artifact_details in artifact_dict.values(): if artifact_details.get('file_path') in all_modified_files_paths: from_version = artifact_details.get('fromversion') to_version = artifact_details.get('toversion') if from_version: min_from_version = min(min_from_version, LooseVersion(from_version)) max_from_version = max(max_from_version, LooseVersion(from_version)) if to_version: max_to_version = max(max_to_version, LooseVersion(to_version)) if max_to_version.vstring == '0.0.0' or max_to_version < max_from_version: max_to_version = LooseVersion('99.99.99') if min_from_version.vstring == '99.99.99': min_from_version = LooseVersion('0.0.0') logging.debug(f'modified files are {all_modified_files_paths}') logging.debug(f'lowest from version found is {min_from_version}') logging.debug(f'highest from version found is {max_from_version}') logging.debug(f'highest to version found is {max_to_version}') return min_from_version.vstring, max_to_version.vstring ","def get_from_version_and_to_version_bounderies(all_modified_files_paths: set, id_set: dict, modified_metadata_list: set = set([])) -> Tuple[str, str]: """"""Computes the lowest from version of the modified files, the highest from version and the highest to version of the modified files. In case that max_from_version is higher than max to version - to version will be the the highest default. Args: all_modified_files_paths: All modified files id_set: the content of the id.set_json Returns: (string, string). The boundaries of the lowest from version (defaults to 0.0.0) and highest to version (defaults to 99.99.99) """""" modified_metadata_list = modified_metadata_list if modified_metadata_list else set([]) max_to_version = LooseVersion('0.0.0') min_from_version = LooseVersion('99.99.99') max_from_version = LooseVersion('0.0.0') for metadata in modified_metadata_list: pack_metadata_path = os.path.join(tools.pack_name_to_path(metadata), PACKS_PACK_META_FILE_NAME) pack_metadata = get_pack_metadata(pack_metadata_path) from_version = pack_metadata.get('serverMinVersion') to_version = pack_metadata.get('serverMaxVersion') if from_version: min_from_version = min(min_from_version, LooseVersion(from_version)) max_from_version = max(max_from_version, LooseVersion(from_version)) if to_version: max_to_version = max(max_to_version, LooseVersion(to_version)) for artifacts in id_set.values(): for artifact_dict in artifacts: for artifact_details in artifact_dict.values(): if artifact_details.get('file_path') in all_modified_files_paths: from_version = artifact_details.get('fromversion') to_version = artifact_details.get('toversion') if from_version: min_from_version = min(min_from_version, LooseVersion(from_version)) max_from_version = max(max_from_version, LooseVersion(from_version)) if to_version: max_to_version = max(max_to_version, LooseVersion(to_version)) if max_to_version.vstring == '0.0.0' or max_to_version < max_from_version: max_to_version = LooseVersion('99.99.99') if min_from_version.vstring == '99.99.99': min_from_version = LooseVersion('0.0.0') logging.debug(f'modified files are {all_modified_files_paths}') logging.debug(f'lowest from version found is {min_from_version}') logging.debug(f'highest from version found is {max_from_version}') logging.debug(f'highest to version found is {max_to_version}') return min_from_version.vstring, max_to_version.vstring " 23036,"def aggregate_row_groups(parts, stats, chunksize): if (""file_path_0"" not in stats[0]) or (stats[0][""file_path_0""] is None): return parts, stats parts_agg = [] stats_agg = [] chunksize = parse_bytes(chunksize) next_part, next_stat = [parts[0].copy()], stats[0].copy() for i in range(1, len(parts)): stat, part = stats[i], parts[i] if (stat[""file_path_0""] == next_stat[""file_path_0""]) and ( (next_stat[""total_byte_size""] + stat[""total_byte_size""]) <= chunksize ): # Update part list next_part.append(part) # Update Statistics next_stat[""total_byte_size""] += stat[""total_byte_size""] next_stat[""num-rows""] += stat[""num-rows""] for col, col_add in zip(next_stat[""columns""], stat[""columns""]): if col[""name""] != col_add[""name""]: raise ValueError(""Columns are different!!"") if ""null_count"" in col: col[""null_count""] += col_add[""null_count""] if ""min"" in col: col[""min""] = min(col[""min""], col_add[""min""]) if ""max"" in col: col[""max""] = max(col[""max""], col_add[""max""]) else: parts_agg.append(next_part) stats_agg.append(next_stat) next_part, next_stat = [part.copy()], stat.copy() parts_agg.append(next_part) stats_agg.append(next_stat) return parts_agg, stats_agg ","def aggregate_row_groups(parts, stats, chunksize): if not stats[0].get(""file_path_0"", None): return parts, stats parts_agg = [] stats_agg = [] chunksize = parse_bytes(chunksize) next_part, next_stat = [parts[0].copy()], stats[0].copy() for i in range(1, len(parts)): stat, part = stats[i], parts[i] if (stat[""file_path_0""] == next_stat[""file_path_0""]) and ( (next_stat[""total_byte_size""] + stat[""total_byte_size""]) <= chunksize ): # Update part list next_part.append(part) # Update Statistics next_stat[""total_byte_size""] += stat[""total_byte_size""] next_stat[""num-rows""] += stat[""num-rows""] for col, col_add in zip(next_stat[""columns""], stat[""columns""]): if col[""name""] != col_add[""name""]: raise ValueError(""Columns are different!!"") if ""null_count"" in col: col[""null_count""] += col_add[""null_count""] if ""min"" in col: col[""min""] = min(col[""min""], col_add[""min""]) if ""max"" in col: col[""max""] = max(col[""max""], col_add[""max""]) else: parts_agg.append(next_part) stats_agg.append(next_stat) next_part, next_stat = [part.copy()], stat.copy() parts_agg.append(next_part) stats_agg.append(next_stat) return parts_agg, stats_agg " 8306,"def find_targets(run_name, owner, job_id=None): lock_args = [ 'teuthology-lock', '--list-targets', '--desc-pattern', '/' + run_name + '/' + (job_id if job_id else ''), '--status', 'up', '--owner', owner ] proc = subprocess.Popen(lock_args, stdout=subprocess.PIPE) stdout, stderr = proc.communicate() out_obj = yaml.safe_load(stdout) if not out_obj or 'targets' not in out_obj: return {} return out_obj ","def find_targets(owner, run_name, job_id=None): lock_args = [ 'teuthology-lock', '--list-targets', '--desc-pattern', '/' + run_name + '/' + (job_id if job_id else ''), '--status', 'up', '--owner', owner ] proc = subprocess.Popen(lock_args, stdout=subprocess.PIPE) stdout, stderr = proc.communicate() out_obj = yaml.safe_load(stdout) if not out_obj or 'targets' not in out_obj: return {} return out_obj " 32493,"def get_xql_query_results_polling_command(client: Client, args: dict) -> Union[CommandResults, list]: """"""Retrieve results of an executed XQL query API executes as a scheduled command. Args: client (Client): The XDR Client. args (dict): The arguments to pass to the API call. Returns: Union[CommandResults, dict]: The command results. """""" # get the query data either from the integration context (if its not the first run) or from the given args. query_id = args.get('query_id', '') parse_result_file_to_context = argToBoolean(args.get('parse_result_file_to_context', 'false')) integration_context, _ = get_integration_context_with_version() command_data_raw = integration_context.get(query_id, args) command_data = json.loads(command_data_raw) if type(command_data_raw) is str\ else integration_context.get(query_id, args) command_name = command_data.get('command_name', demisto.command()) interval_in_secs = int(args.get('interval_in_seconds', 10)) max_fields = arg_to_number(args.get('max_fields', 20)) if max_fields is None: raise DemistoException('Please provide a valid number for max_fields argument.') outputs, file_data = get_xql_query_results(client, args) # get query results with query_id outputs.update({'query_name': command_data.get('query_name', '')}) outputs_prefix = get_outputs_prefix(command_name) command_results = CommandResults(outputs_prefix=outputs_prefix, outputs_key_field='execution_id', outputs=outputs, raw_response=copy.deepcopy(outputs)) # if there are more than 1000 results if file_data: if not parse_result_file_to_context: # Extracts the results into a file only file = fileResult(filename=""results.gz"", data=file_data) command_results.readable_output = 'More than 1000 results were retrieved, see the compressed gzipped file below.' remove_query_id_from_integration_context(query_id) return [file, command_results] else: # Parse the results to context: data = gzip.decompress(file_data).decode() outputs['results'] = [json.loads(line) for line in data.split(""\n"") if len(line) > 0] # if status is pending, in versions above 6.2.0, the command will be called again in the next run until success. if outputs.get('status') == 'PENDING': if not is_demisto_version_ge('6.2.0'): # only 6.2.0 version and above support polling command. remove_query_id_from_integration_context(query_id) return command_results scheduled_command = ScheduledCommand(command='xdr-xql-get-query-results', next_run_in_seconds=interval_in_secs, args=args, timeout_in_seconds=600) command_results.scheduled_command = scheduled_command command_results.readable_output = 'Query is still running, it may take a little while...' return command_results results_to_format = outputs.pop('results') # create Human Readable output query = command_data.get('query', '') time_frame = command_data.get('time_frame') extra_for_human_readable = ({'query': query, 'time_frame': time_frame}) outputs.update(extra_for_human_readable) command_results.readable_output = tableToMarkdown('General Information', outputs, headerTransform=string_to_table_header, removeNull=True) [outputs.pop(key) for key in list(extra_for_human_readable.keys())] # if no fields were given in the query then the default fields are returned (without empty fields). if results_to_format: formatted_list = format_results(results_to_format, remove_empty_fields=False) \ if 'fields' in query else format_results(results_to_format) if formatted_list and command_name == 'xdr-xql-generic-query' and len(formatted_list[0].keys()) > max_fields: raise DemistoException('The number of fields per result has exceeded the maximum number of allowed fields, ' 'please select specific fields in the query or increase the maximum number of ' 'allowed fields.') outputs.update({'results': formatted_list}) command_results.outputs = outputs command_results.readable_output += tableToMarkdown('Data Results', outputs.get('results'), headerTransform=string_to_table_header) remove_query_id_from_integration_context(query_id) return command_results ","def get_xql_query_results_polling_command(client: Client, args: dict) -> Union[CommandResults, list]: """"""Retrieve results of an executed XQL query API executes as a scheduled command. Args: client (Client): The XDR Client. args (dict): The arguments to pass to the API call. Returns: Union[CommandResults, dict]: The command results. """""" # get the query data either from the integration context (if its not the first run) or from the given args. query_id = args.get('query_id', '') parse_result_file_to_context = argToBoolean(args.get('parse_result_file_to_context', 'false')) integration_context, _ = get_integration_context_with_version() command_data_raw = integration_context.get(query_id, args) command_data = json.loads(command_data_raw) if isinstance(command_data_raw, str)\ else integration_context.get(query_id, args) command_name = command_data.get('command_name', demisto.command()) interval_in_secs = int(args.get('interval_in_seconds', 10)) max_fields = arg_to_number(args.get('max_fields', 20)) if max_fields is None: raise DemistoException('Please provide a valid number for max_fields argument.') outputs, file_data = get_xql_query_results(client, args) # get query results with query_id outputs.update({'query_name': command_data.get('query_name', '')}) outputs_prefix = get_outputs_prefix(command_name) command_results = CommandResults(outputs_prefix=outputs_prefix, outputs_key_field='execution_id', outputs=outputs, raw_response=copy.deepcopy(outputs)) # if there are more than 1000 results if file_data: if not parse_result_file_to_context: # Extracts the results into a file only file = fileResult(filename=""results.gz"", data=file_data) command_results.readable_output = 'More than 1000 results were retrieved, see the compressed gzipped file below.' remove_query_id_from_integration_context(query_id) return [file, command_results] else: # Parse the results to context: data = gzip.decompress(file_data).decode() outputs['results'] = [json.loads(line) for line in data.split(""\n"") if len(line) > 0] # if status is pending, in versions above 6.2.0, the command will be called again in the next run until success. if outputs.get('status') == 'PENDING': if not is_demisto_version_ge('6.2.0'): # only 6.2.0 version and above support polling command. remove_query_id_from_integration_context(query_id) return command_results scheduled_command = ScheduledCommand(command='xdr-xql-get-query-results', next_run_in_seconds=interval_in_secs, args=args, timeout_in_seconds=600) command_results.scheduled_command = scheduled_command command_results.readable_output = 'Query is still running, it may take a little while...' return command_results results_to_format = outputs.pop('results') # create Human Readable output query = command_data.get('query', '') time_frame = command_data.get('time_frame') extra_for_human_readable = ({'query': query, 'time_frame': time_frame}) outputs.update(extra_for_human_readable) command_results.readable_output = tableToMarkdown('General Information', outputs, headerTransform=string_to_table_header, removeNull=True) [outputs.pop(key) for key in list(extra_for_human_readable.keys())] # if no fields were given in the query then the default fields are returned (without empty fields). if results_to_format: formatted_list = format_results(results_to_format, remove_empty_fields=False) \ if 'fields' in query else format_results(results_to_format) if formatted_list and command_name == 'xdr-xql-generic-query' and len(formatted_list[0].keys()) > max_fields: raise DemistoException('The number of fields per result has exceeded the maximum number of allowed fields, ' 'please select specific fields in the query or increase the maximum number of ' 'allowed fields.') outputs.update({'results': formatted_list}) command_results.outputs = outputs command_results.readable_output += tableToMarkdown('Data Results', outputs.get('results'), headerTransform=string_to_table_header) remove_query_id_from_integration_context(query_id) return command_results " 22672,"def source_container_exit(task_args: dict): """"""Run a source container build. :param task_args: CLI arguments for a source-container-exit task """""" params = SourceBuildTaskParams.from_cli_args(task_args) task = SourceBuildExitTask(params) return task.execute() ","def source_container_exit(task_args: dict): """"""Run source container exit steps. :param task_args: CLI arguments for a source-container-exit task """""" params = SourceBuildTaskParams.from_cli_args(task_args) task = SourceBuildExitTask(params) return task.execute() " 27056,"def render_template_file( template_name: str, context: Dict[str, Any], autoescape: bool = True, keep_trailing_newline: bool = False, ) -> str: """""" Renders template based on it's name. Reads the template from filein current dir. :param template_name: name of the template to use :param context: Jinja2 context :param autoescape: Whether to autoescape HTML :param keep_trailing_newline: Whether to keep the newline in rendered output :return: rendered template """""" import jinja2 template_loader = jinja2.FileSystemLoader(searchpath=MY_DIR_PATH) template_env = jinja2.Environment( loader=template_loader, undefined=jinja2.StrictUndefined, autoescape=autoescape, keep_trailing_newline=keep_trailing_newline, ) template = template_env.get_template(template_name) content: str = template.render(context) return content ","def render_template_file( template_name: str, context: Dict[str, Any], autoescape: bool = True, keep_trailing_newline: bool = False, ) -> str: """""" Renders template based on its name. Reads the template from file in the current dir. :param template_name: name of the template to use :param context: Jinja2 context :param autoescape: Whether to autoescape HTML :param keep_trailing_newline: Whether to keep the newline in rendered output :return: rendered template """""" import jinja2 template_loader = jinja2.FileSystemLoader(searchpath=MY_DIR_PATH) template_env = jinja2.Environment( loader=template_loader, undefined=jinja2.StrictUndefined, autoescape=autoescape, keep_trailing_newline=keep_trailing_newline, ) template = template_env.get_template(template_name) content: str = template.render(context) return content " 12503,"def typeshed_py_version(options: Options) -> Tuple[int, int]: """"""Return Python version used for checking whether module supports typeshed."""""" # Typeshed no longer covers Python 3.x versions before 3.6, so 3.6 is # the earliest we can support. return max(options.python_version, (3, 6)) ","def typeshed_py_version(options: Options) -> Tuple[int, int]: """"""Return Python version used for checking whether module supports typeshed."""""" # Typeshed no longer covers Python 3.x versions before 3.6, so 3.6 is # the earliest we can support. return max(options.python_version, (3, 7)) " 19937,"def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), name=dict(type='str', required=True), state=dict(type='str', required=True, choices=['present', 'absent', 'list']), details=dict(type='bool', required=False, default=False), size=dict(type='str', required=False), min_size=dict(type='str', required=False), pg_num=dict(type='str', required=False, default=None), pgp_num=dict(type='str', required=False, default=None), pg_autoscale_mode=dict(type='str', required=False, default='on'), target_size_ratio=dict(type='str', required=False, default=None), pool_type=dict(type='str', required=False, default='replicated', choices=['replicated', 'erasure', '1', '3']), erasure_profile=dict(type='str', required=False, default='default'), rule_name=dict(type='str', required=False, default=None), expected_num_objects=dict(type='str', required=False, default=""0""), application=dict(type='str', required=False, default=None), ) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True, ) # Gather module parameters in variables cluster = module.params.get('cluster') name = module.params.get('name') state = module.params.get('state') details = module.params.get('details') pg_num = module.params.get('pg') pgp_num = module.params.get('pgp') pg_autoscale_mode = module.params.get('pg_autoscale_mode') target_size_ratio = module.params.get('target_size_ratio') application = module.params.get('application') if module.params.get('pg_autoscale_mode').lower() in ['true', 'on', 'yes']: pg_autoscale_mode = 'on' elif module.params.get('pg_autoscale_mode').lower() in ['false', 'off', 'no']: pg_autoscale_mode = 'off' else: pg_autoscale_mode = 'warn' if module.params.get('pool_type') == '1': pool_type = 'replicated' elif module.params.get('pool_type') == '3': pool_type = 'erasure' else: pool_type = module.params.get('pool_type') if module.params.get('rule_name') == None: rule_name = 'replicated_rule' if pool_type == 'replicated' else None else: rule_name = module.params.get('rule_name') erasure_profile = module.params.get('erasure_profile') expected_num_objects = module.params.get('expected_num_objects') user_pool_config = { 'pool_name': { 'value': name }, 'pg_num': { 'value': pg_num, 'cli_set_opt': 'pg_num' }, 'pgp_num': { 'value': pgp_num, 'cli_set_opt': 'pgp_num' }, 'pg_autoscale_mode': { 'value': pg_autoscale_mode, 'cli_set_opt': 'pg_autoscale_mode' }, 'target_size_ratio': { 'value': target_size_ratio, 'cli_set_opt': 'target_size_ratio' }, 'application': {'value': application }, 'type': { 'value': pool_type }, 'erasure_profile': { 'value': erasure_profile }, 'crush_rule': { 'value': rule_name, 'cli_set_opt': 'crush_rule' }, 'expected_num_objects': { 'value': expected_num_objects } } if module.check_mode: return dict( changed=False, stdout='', stderr='', rc='', start='', end='', delta='', ) startd = datetime.datetime.now() changed = False # will return either the image name or None container_image = is_containerized() user = ""client.admin"" keyring_filename = cluster + '.' + user + '.keyring' user_key = os.path.join(""/etc/ceph/"", keyring_filename) def_opt = { 'size': { 'conf_name': 'osd_pool_default_size', 'cli_set_opt': 'size' }, 'min_size': { 'conf_name': 'osd_pool_default_min_size', 'cli_set_opt': 'min_size' }, 'pg_num': { 'conf_name': 'osd_pool_default_pg_num', 'cli_set_opt': 'pg_num' }, 'pgp_num': { 'conf_name': 'osd_pool_default_pgp_num', 'cli_set_opt': 'pgp_num' } } if state == ""present"": rc, cmd, default_running_ceph_config, err = get_default_running_config(module, cluster, user, user_key, container_image=container_image) if rc == 0: for k, v in def_opt.items(): if module.params[k] == None: user_pool_config[k] = {'value': default_running_ceph_config[v['conf_name']], 'cli_set_opt': v['cli_set_opt']} else: user_pool_config[k] = {'value': module.params.get(k), 'cli_set_opt': v['cli_set_opt']} rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image)) if rc == 0: running_pool_details = get_pool_details(module, cluster, name, user, user_key, container_image=container_image) user_pool_config['pg_placement_num'] = { 'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num' } delta = compare_pool_config(user_pool_config, running_pool_details[2]) if len(delta) > 0 and running_pool_details[2]['erasure_code_profile'] == """" and 'size' not in delta.keys(): rc, cmd, out, err = update_pool(module, cluster, name, user, user_key, delta, container_image=container_image) if rc == 0: changed = True else: out = ""Pool {} already exists and there is nothing to update."".format(name) else: rc, cmd, out, err = exec_commands(module, create_pool(cluster, name, user, user_key, user_pool_config=user_pool_config, container_image=container_image)) if user_pool_config['application']['value'] != None: _rc, _cmd, _out, _err = exec_commands(module, enable_application_pool(cluster, name, user_pool_config['application']['value'], user, user_key, container_image=container_image)) changed = True elif state == ""list"": rc, cmd, out, err = exec_commands(module, list_pools(cluster, name, user, user_key, details, container_image=container_image)) if rc != 0: out = ""Couldn't list pool(s) present on the cluster"" elif state == ""absent"": rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image)) if rc == 0: rc, cmd, out, err = exec_commands(module, remove_pool(cluster, name, user, user_key, container_image=container_image)) changed = True else: rc = 0 out = ""Pool {} doesn't exist"".format(name) exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) ","def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), name=dict(type='str', required=True), state=dict(type='str', required=True, choices=['present', 'absent', 'list']), details=dict(type='bool', required=False, default=False), size=dict(type='str', required=False), min_size=dict(type='str', required=False), pg_num=dict(type='str', required=False, default=None), pgp_num=dict(type='str', required=False, default=None), pg_autoscale_mode=dict(type='str', required=False, default='on'), target_size_ratio=dict(type='str', required=False, default=None), pool_type=dict(type='str', required=False, default='replicated', choices=['replicated', 'erasure', '1', '3']), erasure_profile=dict(type='str', required=False, default='default'), rule_name=dict(type='str', required=False, default=None), expected_num_objects=dict(type='str', required=False, default=""0""), application=dict(type='str', required=False, default=None), ) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True, ) # Gather module parameters in variables cluster = module.params.get('cluster') name = module.params.get('name') state = module.params.get('state') details = module.params.get('details') pg_num = module.params.get('pg') pgp_num = module.params.get('pgp') pg_autoscale_mode = module.params.get('pg_autoscale_mode') target_size_ratio = module.params.get('target_size_ratio') application = module.params.get('application') if module.params.get('pg_autoscale_mode').lower() in ['true', 'on', 'yes']: pg_autoscale_mode = 'on' elif module.params.get('pg_autoscale_mode').lower() in ['false', 'off', 'no']: pg_autoscale_mode = 'off' else: pg_autoscale_mode = 'warn' if module.params.get('pool_type') == '1': pool_type = 'replicated' elif module.params.get('pool_type') == '3': pool_type = 'erasure' else: pool_type = module.params.get('pool_type') if module.params.get('rule_name') == None: rule_name = 'replicated_rule' if pool_type == 'replicated' else None else: rule_name = module.params.get('rule_name') erasure_profile = module.params.get('erasure_profile') expected_num_objects = module.params.get('expected_num_objects') user_pool_config = { 'pool_name': { 'value': name }, 'pg_num': { 'value': pg_num, 'cli_set_opt': 'pg_num' }, 'pgp_num': { 'value': pgp_num, 'cli_set_opt': 'pgp_num' }, 'pg_autoscale_mode': { 'value': pg_autoscale_mode, 'cli_set_opt': 'pg_autoscale_mode' }, 'target_size_ratio': { 'value': target_size_ratio, 'cli_set_opt': 'target_size_ratio' }, 'application': {'value': application }, 'type': { 'value': pool_type }, 'erasure_profile': { 'value': erasure_profile }, 'crush_rule': { 'value': rule_name, 'cli_set_opt': 'crush_rule' }, 'expected_num_objects': { 'value': expected_num_objects } } if module.check_mode: return dict( changed=False, stdout='', stderr='', rc='', start='', end='', delta='', ) startd = datetime.datetime.now() changed = False # will return either the image name or None container_image = is_containerized() user = ""client.admin"" keyring_filename = cluster + '.' + user + '.keyring' user_key = os.path.join(""/etc/ceph/"", keyring_filename) def_opt = { 'size': { 'conf_name': 'osd_pool_default_size', 'cli_set_opt': 'size' }, 'min_size': { 'conf_name': 'osd_pool_default_min_size', 'cli_set_opt': 'min_size' }, 'pg_num': { 'conf_name': 'osd_pool_default_pg_num', 'cli_set_opt': 'pg_num' }, 'pgp_num': { 'conf_name': 'osd_pool_default_pgp_num', 'cli_set_opt': 'pgp_num' } } if state == ""present"": rc, cmd, default_running_ceph_config, err = get_default_running_config(module, cluster, user, user_key, container_image=container_image) if rc == 0: for k, v in def_opt.items(): if module.params[k] == None: user_pool_config[k] = {'value': default_running_ceph_config[v['conf_name']], 'cli_set_opt': v['cli_set_opt']} else: user_pool_config[k] = {'value': module.params.get(k), 'cli_set_opt': v['cli_set_opt']} rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image)) if rc == 0: running_pool_details = get_pool_details(module, cluster, name, user, user_key, container_image=container_image) user_pool_config['pg_placement_num'] = { 'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num' } delta = compare_pool_config(user_pool_config, running_pool_details[2]) if len(delta) > 0 and running_pool_details[2]['erasure_code_profile'] == """" and 'size' not in delta.keys(): rc, cmd, out, err = update_pool(module, cluster, name, user, user_key, delta, container_image=container_image) if rc == 0: changed = True else: out = ""Pool {} already exists and there is nothing to update."".format(name) else: rc, cmd, out, err = exec_commands(module, create_pool(cluster, name, user, user_key, user_pool_config=user_pool_config, container_image=container_image)) if user_pool_config['application']['value'] != None: _rc, _cmd, _out, _err = exec_commands(module, enable_application_pool(cluster, name, user_pool_config['application']['value'], user, user_key, container_image=container_image)) changed = True elif state == ""list"": rc, cmd, out, err = exec_commands(module, list_pools(cluster, name, user, user_key, details, container_image=container_image)) if rc != 0: out = ""Couldn't list pool(s) present on the cluster"" elif state == ""absent"": rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image)) if rc == 0: rc, cmd, out, err = exec_commands(module, remove_pool(cluster, name, user, user_key, container_image=container_image)) changed = True else: rc = 0 out = ""Skipped, since pool {} doesn't exist"".format(name) exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) " 31804,"def build_indicators_iterator(attributes: Dict[str, Any], url: Optional[str]) -> List[Dict[str, Any]]: """""" Creates a list of valid indicators types to be created Args: attributes: List of attributes returned from MISP url: Feed URL Returns: List of indicators and their types """""" indicators_iterator = [] try: attributes_list: List[Dict[str, Any]] = attributes['response']['Attribute'] for attribute in attributes_list: if get_attribute_indicator_type(attribute): indicators_iterator.append({ 'value': attribute, 'type': get_attribute_indicator_type(attribute), 'raw_type': attribute['type'], 'FeedURL': url, }) except KeyError as err: demisto.debug(str(err)) raise KeyError(f'Could not parse returned data as attributes list. \n\nError massage: {err}') return indicators_iterator ","def build_indicators_iterator(attributes: Dict[str, Any], url: Optional[str]) -> List[Dict[str, Any]]: """""" Creates a list of valid indicators types to be created Args: attributes: List of attributes returned from MISP url: Feed URL Returns: List of indicators and their types """""" indicators_iterator = [] try: attributes_list: List[Dict[str, Any]] = attributes['response']['Attribute'] for attribute in attributes_list: if get_attribute_indicator_type(attribute): indicators_iterator.append({ 'value': attribute, 'type': get_attribute_indicator_type(attribute), 'raw_type': attribute['type'], 'FeedURL': url, }) except KeyError as err: demisto.debug(str(err)) raise KeyError(f'Could not parse returned data as attributes list.\nError massage: {err}') return indicators_iterator " 43171,"def from_cugraph(cugraph_graph): """"""Create a graph from a cugraph graph and return. Parameters ---------- cugraph_graph : cugraph.Graph The cugraph graph holding the graph structure and the node/edge attributes. If the input graph is undirected, DGL converts it to a directed graph by :func:`cugraph.Graph.to_directed`. Returns ------- DGLGraph The created graph. Examples -------- The following example uses PyTorch backend. >>> import dgl >>> import cugraph >>> import cudf Create a cugraph graph. >>> cugraph_g = cugraph.Graph(directed=True) >>> df = cudf.DataFrame({""source"":[0, 1, 2, 3], ""destination"":[1, 2, 3, 0]}) >>> cugraph_g.from_cudf_edgelist(df) Convert it into a DGLGraph >>> g = dgl.from_cugraph(cugraph_g) >>> g.edges() (tensor([1, 2, 3, 0], device='cuda:0'), tensor([2, 3, 0, 1], device='cuda:0')) """""" if not cugraph_graph.is_directed(): cugraph_graph = cugraph_graph.to_directed() edges = cugraph_graph.edges() src_t = F.zerocopy_from_dlpack(edges['src'].to_dlpack()) dst_t = F.zerocopy_from_dlpack(edges['dst'].to_dlpack()) g = graph((src_t,dst_t)) return g ","def from_cugraph(cugraph_graph): """"""Create a graph from a :class:`cugraph.Graph` object. Parameters ---------- cugraph_graph : cugraph.Graph The cugraph graph holding the graph structure and the node/edge attributes. If the input graph is undirected, DGL converts it to a directed graph by :func:`cugraph.Graph.to_directed`. Returns ------- DGLGraph The created graph. Examples -------- The following example uses PyTorch backend. >>> import dgl >>> import cugraph >>> import cudf Create a cugraph graph. >>> cugraph_g = cugraph.Graph(directed=True) >>> df = cudf.DataFrame({""source"":[0, 1, 2, 3], ""destination"":[1, 2, 3, 0]}) >>> cugraph_g.from_cudf_edgelist(df) Convert it into a DGLGraph >>> g = dgl.from_cugraph(cugraph_g) >>> g.edges() (tensor([1, 2, 3, 0], device='cuda:0'), tensor([2, 3, 0, 1], device='cuda:0')) """""" if not cugraph_graph.is_directed(): cugraph_graph = cugraph_graph.to_directed() edges = cugraph_graph.edges() src_t = F.zerocopy_from_dlpack(edges['src'].to_dlpack()) dst_t = F.zerocopy_from_dlpack(edges['dst'].to_dlpack()) g = graph((src_t,dst_t)) return g " 37807,"def evaluate_word_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str: value: str = node.word for part in node.parts: part_string = context.input[part.pos[0]:part.pos[1]] part_value = evaluate_node(part, context=context) if part_string not in value: raise RuntimeError( f'bash parse failed. part ""{part_string}"" not found in ""{value}"". ' 'Word was ""{node.word}"". Full input was ""{context.input}""' ) value = value.replace(part_string, part_value, 1) return value ","def evaluate_word_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str: value: str = node.word for part in node.parts: part_string = context.input[part.pos[0]:part.pos[1]] part_value = evaluate_node(part, context=context) if part_string not in value: raise RuntimeError( f'bash parse failed. part ""{part_string}"" not found in ""{value}"". ' f'Word was ""{node.word}"". Full input was ""{context.input}""' ) value = value.replace(part_string, part_value, 1) return value " 51549,"def main(args=sys.argv): """"""Main CLI entrypoint"""""" lgr.log(5, ""Starting main(%r)"", args) # record that we came in via the cmdline datalad.__api = 'cmdline' completing = ""_ARGCOMPLETE"" in os.environ if completing: import shlex # TODO support posix=False too? args = shlex.split(os.environ.get('COMP_LINE')) or args if _on_msys_tainted_paths(): # Possibly present DataLadRIs were stripped of a leading / from .helpers import _fix_datalad_ri args = [_fix_datalad_ri(s) for s in args] # PYTHON_ARGCOMPLETE_OK # TODO possibly construct a dedicated parser just for autocompletion # rather than lobotomizing the normal one parser = setup_parser(args, completing=completing) try: import argcomplete argcomplete.autocomplete(parser) except ImportError: pass # parse cmd args lgr.debug(""Parsing known args among %r"", args) cmdlineargs, unparsed_args = parser.parse_known_args(args[1:]) # did the parser tell us what command to run? has_func = hasattr(cmdlineargs, 'func') and cmdlineargs.func is not None if unparsed_args: if has_func: lgr.error('unknown argument{}: {}'.format( 's' if len(unparsed_args) > 1 else '', unparsed_args if len(unparsed_args) > 1 else unparsed_args[0]) ) cmdlineargs.subparser.print_usage() sys.exit(1) else: # store all unparsed arguments cmdlineargs.datalad_unparsed_args = unparsed_args # pull config overrides from cmdline args and put in effect if cmdlineargs.cfg_overrides is not None: from .helpers import _parse_overrides_from_cmdline datalad.cfg.overrides.update( _parse_overrides_from_cmdline(cmdlineargs) ) # enable overrides datalad.cfg.reload(force=True) if 'datalad.runtime.librarymode' in datalad.cfg: datalad.enable_librarymode() if cmdlineargs.change_path is not None: from datalad.utils import chpwd for path in cmdlineargs.change_path: chpwd(path) # check argparse could determine what commands needs to be executed if not has_func: # just let argparser spit out its error, since there is smth wrong parser.parse_args(args) # if that one didn't puke -- we should parser.print_usage() lgr.error(""Please specify the command"") # matches exit code for InsufficientArgumentsError sys.exit(2) # execute the command, either with a debugger catching # a crash, or with a simplistic exception handler. # note that result rendering is happening in the # execution handler, when the command-generator is unwound ret = _run_with_debugger(cmdlineargs) \ if cmdlineargs.common_debug or cmdlineargs.common_idebug \ else _run_with_exception_handler(cmdlineargs) # all good, not strictly needed, but makes internal testing easier sys.exit(0) ","def main(args=sys.argv): """"""Main CLI entrypoint"""""" lgr.log(5, ""Starting main(%r)"", args) # record that we came in via the cmdline datalad.__api = 'cmdline' completing = ""_ARGCOMPLETE"" in os.environ if completing and 'COMP_LINE' in os.environ: import shlex # TODO support posix=False too? args = shlex.split(os.environ['COMP_LINE']) or args if completing and (COMP_LINE := os.environ.get('COMP_LINE')): import shlex # TODO support posix=False too? args = shlex.split(COMP_LINE) or args if _on_msys_tainted_paths(): # Possibly present DataLadRIs were stripped of a leading / from .helpers import _fix_datalad_ri args = [_fix_datalad_ri(s) for s in args] # PYTHON_ARGCOMPLETE_OK # TODO possibly construct a dedicated parser just for autocompletion # rather than lobotomizing the normal one parser = setup_parser(args, completing=completing) try: import argcomplete argcomplete.autocomplete(parser) except ImportError: pass # parse cmd args lgr.debug(""Parsing known args among %r"", args) cmdlineargs, unparsed_args = parser.parse_known_args(args[1:]) # did the parser tell us what command to run? has_func = hasattr(cmdlineargs, 'func') and cmdlineargs.func is not None if unparsed_args: if has_func: lgr.error('unknown argument{}: {}'.format( 's' if len(unparsed_args) > 1 else '', unparsed_args if len(unparsed_args) > 1 else unparsed_args[0]) ) cmdlineargs.subparser.print_usage() sys.exit(1) else: # store all unparsed arguments cmdlineargs.datalad_unparsed_args = unparsed_args # pull config overrides from cmdline args and put in effect if cmdlineargs.cfg_overrides is not None: from .helpers import _parse_overrides_from_cmdline datalad.cfg.overrides.update( _parse_overrides_from_cmdline(cmdlineargs) ) # enable overrides datalad.cfg.reload(force=True) if 'datalad.runtime.librarymode' in datalad.cfg: datalad.enable_librarymode() if cmdlineargs.change_path is not None: from datalad.utils import chpwd for path in cmdlineargs.change_path: chpwd(path) # check argparse could determine what commands needs to be executed if not has_func: # just let argparser spit out its error, since there is smth wrong parser.parse_args(args) # if that one didn't puke -- we should parser.print_usage() lgr.error(""Please specify the command"") # matches exit code for InsufficientArgumentsError sys.exit(2) # execute the command, either with a debugger catching # a crash, or with a simplistic exception handler. # note that result rendering is happening in the # execution handler, when the command-generator is unwound ret = _run_with_debugger(cmdlineargs) \ if cmdlineargs.common_debug or cmdlineargs.common_idebug \ else _run_with_exception_handler(cmdlineargs) # all good, not strictly needed, but makes internal testing easier sys.exit(0) " 11081,"def _get_builtin_permissions(opts): """""" Return (codename, name) for all autogenerated permissions. By default, this is ('add', 'change', 'delete', 'view') """""" perms = [] for action in opts.default_permissions: perms.append(( get_permission_codename(action, opts), _('Can %(action)s %(verbose_name)s' % {'action': action, 'verbose_name': opts.verbose_name_raw}) )) return perms ","def _get_builtin_permissions(opts): """""" Return (codename, name) for all autogenerated permissions. By default, this is ('add', 'change', 'delete', 'view') """""" perms = [] for action in opts.default_permissions: perms.append(( get_permission_codename(action, opts), _('Can %(action)s %(verbose_name)s') % {'action': action, 'verbose_name': opts.verbose_name_raw} )) return perms " 19431,"def setup_parser(subparser): subparser.add_argument( 'url', nargs='?', help=""url of package archive"") subparser.add_argument( '--keep-stage', action='store_true', help=""don't clean up staging area when command completes"") subparser.add_argument( '-n', '--name', help=""name of the package to create"") subparser.add_argument( '-t', '--template', metavar='TEMPLATE', choices=sorted(templates.keys()), help=""build system template to use. options: %(choices)s"") subparser.add_argument( '-r', '--repo', help=""path to a repository where the package should be created"") subparser.add_argument( '-N', '--namespace', help=""specify a namespace for the package. must be the namespace of "" ""a repository registered with Spack"") subparser.add_argument( '-f', '--force', action='store_true', help=""overwrite any existing package file with the same name"") subparser.add_argument( '--skip-editor', action='store_true', help=""skip the edit session for the package (e.g., automation)"") subparser.add_argument( '-b', '--batch', action='store_true', help=""don't ask which versions to checksum"") subparser.add_argument( '-g', '--git', action='store', nargs='?', default='#AUTO-GIT-URL#', help=""use git to download source from repository passed in url argument"") subparser.add_argument( '-V', '--version', help='override derived package version') subparser.add_argument('-B', '--branch', help=""specify branch(es) of git repository. "" ""Separate multiple branches with space. "" ""Not guaranteed to be reproducible, use "" ""`--commit` when possible. "" ""Only used for git URLs."", nargs='+') subparser.add_argument('-T', '--tag', help=""specify tag(s) of git repository. "" ""Separate multiple tags with space. "" ""Not guaranteed to be reproducible, "" ""use `--commit` when possible. "" ""Only used for git URLs."", nargs='+') subparser.add_argument('-C', '--commit', help=""specify commit id(s) of git repository. "" ""Separate multiple commit ids with space. "" ""Only used for git URLs."", nargs='+') ","def setup_parser(subparser): subparser.add_argument( 'url', nargs='?', help=""url of package archive"") subparser.add_argument( '--keep-stage', action='store_true', help=""don't clean up staging area when command completes"") subparser.add_argument( '-n', '--name', help=""name of the package to create"") subparser.add_argument( '-t', '--template', metavar='TEMPLATE', choices=sorted(templates.keys()), help=""build system template to use. options: %(choices)s"") subparser.add_argument( '-r', '--repo', help=""path to a repository where the package should be created"") subparser.add_argument( '-N', '--namespace', help=""specify a namespace for the package. must be the namespace of "" ""a repository registered with Spack"") subparser.add_argument( '-f', '--force', action='store_true', help=""overwrite any existing package file with the same name"") subparser.add_argument( '--skip-editor', action='store_true', help=""skip the edit session for the package (e.g., automation)"") subparser.add_argument( '-b', '--batch', action='store_true', help=""don't ask which versions to checksum"") subparser.add_argument( '-g', '--git', action='store', nargs='?', default='#AUTO-GIT-URL#', help=""use git to download source from repository passed in url argument"") subparser.add_argument( '-V', '--version', help='override derived package version') subparser.add_argument('-B', '--branch', help=""specify branch(es) of git repository. "" ""Separate multiple branches with space. "" ""Not guaranteed to be reproducible, use "" ""`--commit` when possible. "" ""Only used for git URLs."", nargs='+') subparser.add_argument('-T', '--tag', help=""specify tag(s) of git repository. "" ""Separate multiple tags with space."", nargs='+') subparser.add_argument('-C', '--commit', help=""specify commit id(s) of git repository. "" ""Separate multiple commit ids with space. "" ""Only used for git URLs."", nargs='+') " 30618,"def format_head_or_base_outputs(head_or_base: dict = {}) -> dict: """"""Take GitHub API head or base branch data and format to expected context outputs Args: head_or_base (dict): head or base branch data returned from GitHub API Returns: (dict): head or base branch object formatted to expected context outputs """""" head_or_base_user = head_or_base.get('user', {}) ec_head_or_base_user = format_user_outputs(head_or_base_user) head_or_base_repo = head_or_base.get('repo', {}) if head_or_base_repo: head_or_base_repo_owner = head_or_base_repo.get('owner', {}) else: # in case of an a deleted fork head_or_base_repo = {} head_or_base_repo_owner = { ""Login"": ""Unknown"" } ec_head_or_base_repo_owner = format_user_outputs(head_or_base_repo_owner) ec_head_repo = { 'ID': head_or_base_repo.get('id'), 'NodeID': head_or_base_repo.get('node_id'), 'Name': head_or_base_repo.get('name'), 'FullName': head_or_base_repo.get('full_name'), 'Owner': ec_head_or_base_repo_owner, 'Private': head_or_base_repo.get('private'), 'Description': head_or_base_repo.get('description'), 'Fork': head_or_base_repo.get('fork'), 'Language': head_or_base_repo.get('language'), 'ForksCount': head_or_base_repo.get('forks_count'), 'StargazersCount': head_or_base_repo.get('stargazers_count'), 'WatchersCount': head_or_base_repo.get('watchers_count'), 'Size': head_or_base_repo.get('size'), 'DefaultBranch': head_or_base_repo.get('default_branch'), 'OpenIssuesCount': head_or_base_repo.get('open_issues_count'), 'Topics': head_or_base_repo.get('topics'), 'HasIssues': head_or_base_repo.get('has_issues'), 'HasProjects': head_or_base_repo.get('has_projects'), 'HasWiki': head_or_base_repo.get('has_wiki'), 'HasPages': head_or_base_repo.get('has_pages'), 'HasDownloads': head_or_base_repo.get('has_downloads'), 'Archived': head_or_base_repo.get('archived'), 'Disabled': head_or_base_repo.get('disabled'), 'PushedAt': head_or_base_repo.get('pushed_at'), 'CreatedAt': head_or_base_repo.get('created_at'), 'UpdatedAt': head_or_base_repo.get('updated_at'), 'AllowRebaseMerge': head_or_base_repo.get('allow_rebase_merge'), 'AllowSquashMerge': head_or_base_repo.get('allow_squash_merge'), 'AllowMergeCommit': head_or_base_repo.get('allow_merge_commit'), 'SucscribersCount': head_or_base_repo.get('subscribers_count') } ec_head_or_base = { 'Label': head_or_base.get('label'), 'Ref': head_or_base.get('ref'), 'SHA': head_or_base.get('sha'), 'User': ec_head_or_base_user, 'Repo': ec_head_repo, } return ec_head_or_base ","def format_head_or_base_outputs(head_or_base: dict = {}) -> dict: """"""Take GitHub API head or base branch data and format to expected context outputs Args: head_or_base (dict): head or base branch data returned from GitHub API Returns: (dict): head or base branch object formatted to expected context outputs """""" head_or_base_user = head_or_base.get('user', {}) ec_head_or_base_user = format_user_outputs(head_or_base_user) head_or_base_repo = head_or_base.get('repo', {}) if head_or_base_repo: head_or_base_repo_owner = head_or_base_repo.get('owner', {}) else: # in case of a deleted fork head_or_base_repo = {} head_or_base_repo_owner = { ""Login"": ""Unknown"" } ec_head_or_base_repo_owner = format_user_outputs(head_or_base_repo_owner) ec_head_repo = { 'ID': head_or_base_repo.get('id'), 'NodeID': head_or_base_repo.get('node_id'), 'Name': head_or_base_repo.get('name'), 'FullName': head_or_base_repo.get('full_name'), 'Owner': ec_head_or_base_repo_owner, 'Private': head_or_base_repo.get('private'), 'Description': head_or_base_repo.get('description'), 'Fork': head_or_base_repo.get('fork'), 'Language': head_or_base_repo.get('language'), 'ForksCount': head_or_base_repo.get('forks_count'), 'StargazersCount': head_or_base_repo.get('stargazers_count'), 'WatchersCount': head_or_base_repo.get('watchers_count'), 'Size': head_or_base_repo.get('size'), 'DefaultBranch': head_or_base_repo.get('default_branch'), 'OpenIssuesCount': head_or_base_repo.get('open_issues_count'), 'Topics': head_or_base_repo.get('topics'), 'HasIssues': head_or_base_repo.get('has_issues'), 'HasProjects': head_or_base_repo.get('has_projects'), 'HasWiki': head_or_base_repo.get('has_wiki'), 'HasPages': head_or_base_repo.get('has_pages'), 'HasDownloads': head_or_base_repo.get('has_downloads'), 'Archived': head_or_base_repo.get('archived'), 'Disabled': head_or_base_repo.get('disabled'), 'PushedAt': head_or_base_repo.get('pushed_at'), 'CreatedAt': head_or_base_repo.get('created_at'), 'UpdatedAt': head_or_base_repo.get('updated_at'), 'AllowRebaseMerge': head_or_base_repo.get('allow_rebase_merge'), 'AllowSquashMerge': head_or_base_repo.get('allow_squash_merge'), 'AllowMergeCommit': head_or_base_repo.get('allow_merge_commit'), 'SucscribersCount': head_or_base_repo.get('subscribers_count') } ec_head_or_base = { 'Label': head_or_base.get('label'), 'Ref': head_or_base.get('ref'), 'SHA': head_or_base.get('sha'), 'User': ec_head_or_base_user, 'Repo': ec_head_repo, } return ec_head_or_base " 1592,"def _incremental_weighted_mean_and_var(X, sample_weight, last_weighted_mean, last_weighted_variance, last_weight_sum): """"""Calculate weighted mean and variance batch update last_weighted_mean and last_weighted_variance are statistics computed at the last step by the function. Both must be initialized to 0.0. In case no scaling is required last_weighted_variance can be None. The weighted_mean is always required and returned because necessary for the calculation of the weighted_variance. last_weight sum is the sum of weights encountered until now. Derived from the paper ""Incremental calculation of weighted mean and variance"", by Tony Finch. Parameters ---------- X : array-like, shape (n_samples, n_features) Data to use for statistics update sample_weight : array-like, shape (n_samples,) last_weighted_mean : array-like, shape: (n_features,) last_weighted_variance : array-like, shape: (n_features,) last_weight_sum : array-like, shape (n_features,) Returns ------- updated_weighted_mean : array, shape (n_features,) updated_weighted_variance : array, shape (n_features,) If None, only weighted_mean is computed updated_weight_sum : array, shape (n_features,) Notes ----- NaNs in X are ignored. References ---------- Tony Finch ""Incremental calculation of weighted mean and variance"" University of Cambridge Computing Service, February 2009 """""" # last = stats until now # new = the current increment # updated = the aggregated stats M = np.isnan(X) sample_weight_T = np.transpose(np.reshape(sample_weight, (-1, 1))) new_weight_sum = _safe_accumulator_op(np.dot, sample_weight_T, ~M).ravel() total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0) X_0 = np.where(np.isnan(X), 0, X) new_weighted_mean = \ _safe_accumulator_op(np.average, X_0, weights=sample_weight, axis=0) new_weighted_mean *= total_weight_sum / new_weight_sum updated_weight_sum = last_weight_sum + new_weight_sum updated_weighted_mean = ( (last_weight_sum * last_weighted_mean + new_weight_sum * new_weighted_mean) / updated_weight_sum) if last_weighted_variance is None: updated_weighted_variance = None else: X_0 = np.where(np.isnan(X), 0, (X-new_weighted_mean)**2) new_weighted_variance = \ _safe_accumulator_op( np.average, X_0, weights=sample_weight, axis=0) new_weighted_variance *= total_weight_sum / new_weight_sum new_element = ( new_weight_sum * (new_weighted_variance + (new_weighted_mean - updated_weighted_mean) ** 2)) last_element = ( last_weight_sum * (last_weighted_variance + (last_weighted_mean - updated_weighted_mean) ** 2)) updated_weighted_variance = ( new_element + last_element) / updated_weight_sum return updated_weighted_mean, updated_weighted_variance, updated_weight_sum ","def _incremental_weighted_mean_and_var(X, sample_weight, last_weighted_mean, last_weighted_variance, last_weight_sum): """"""Calculate weighted mean and variance batch update last_weighted_mean and last_weighted_variance are statistics computed at the last step by the function. Both must be initialized to 0.0. In case no scaling is required last_weighted_variance can be None. The weighted_mean is always required and returned because necessary for the calculation of the weighted_variance. last_weight sum is the sum of weights encountered until now. Derived from the paper ""Incremental calculation of weighted mean and variance"", by Tony Finch. Parameters ---------- X : array-like of shape (n_samples, n_features) Data to use for statistics update sample_weight : array-like, shape (n_samples,) last_weighted_mean : array-like, shape: (n_features,) last_weighted_variance : array-like, shape: (n_features,) last_weight_sum : array-like, shape (n_features,) Returns ------- updated_weighted_mean : array, shape (n_features,) updated_weighted_variance : array, shape (n_features,) If None, only weighted_mean is computed updated_weight_sum : array, shape (n_features,) Notes ----- NaNs in X are ignored. References ---------- Tony Finch ""Incremental calculation of weighted mean and variance"" University of Cambridge Computing Service, February 2009 """""" # last = stats until now # new = the current increment # updated = the aggregated stats M = np.isnan(X) sample_weight_T = np.transpose(np.reshape(sample_weight, (-1, 1))) new_weight_sum = _safe_accumulator_op(np.dot, sample_weight_T, ~M).ravel() total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0) X_0 = np.where(np.isnan(X), 0, X) new_weighted_mean = \ _safe_accumulator_op(np.average, X_0, weights=sample_weight, axis=0) new_weighted_mean *= total_weight_sum / new_weight_sum updated_weight_sum = last_weight_sum + new_weight_sum updated_weighted_mean = ( (last_weight_sum * last_weighted_mean + new_weight_sum * new_weighted_mean) / updated_weight_sum) if last_weighted_variance is None: updated_weighted_variance = None else: X_0 = np.where(np.isnan(X), 0, (X-new_weighted_mean)**2) new_weighted_variance = \ _safe_accumulator_op( np.average, X_0, weights=sample_weight, axis=0) new_weighted_variance *= total_weight_sum / new_weight_sum new_element = ( new_weight_sum * (new_weighted_variance + (new_weighted_mean - updated_weighted_mean) ** 2)) last_element = ( last_weight_sum * (last_weighted_variance + (last_weighted_mean - updated_weighted_mean) ** 2)) updated_weighted_variance = ( new_element + last_element) / updated_weight_sum return updated_weighted_mean, updated_weighted_variance, updated_weight_sum " 31241,"def main(): # get current time now = datetime.now() # args list_name = demisto.getArg(""listname"") # update list name to start with 'OOO', so we can't overwrite other lists with this if not list_name.startswith(""OOO""): list_name = f""OOO {list_name}"" # get the current list ooo_list = demisto.executeCommand(""getList"", {""listName"": list_name})[0][""Contents""] # check if the list exists, if not create it: if ""Item not found"" in ooo_list: demisto.results(demisto.executeCommand(""createList"", {""listName"": list_name, ""listData"": []})) ooo_list = demisto.executeCommand(""getList"", {""listName"": list_name})[0][""Contents""] # check status of the list, and add/remove the user from it. if not ooo_list or ooo_list == [] or ooo_list == """": list_data = [] else: list_data = json.loads(ooo_list) # loop the list, removing any where the offuntil is in the past remove = [] for i in list_data: off_until = datetime.strptime(i['offuntil'], ""%Y-%m-%d"") if off_until < now: remove.append(i['user']) # remove the users from the list. list_data = [i for i in list_data if i['user'] not in remove] # set the list, return results demisto.executeCommand(""setList"", {""listName"": list_name, ""listData"": json.dumps(list_data)}) demisto.results(f""Removed Users from Out of Office List {list_name}: {str(remove)}"") ","def main(): # get current time now = datetime.now() # args list_name = demisto.getArg(""listname"") # update list name to start with 'OOO', so we can't overwrite other lists with this if not list_name.startswith(""OOO""): list_name = f""OOO {list_name}"" # get the current list ooo_list = demisto.executeCommand(""getList"", {""listName"": list_name})[0][""Contents""] # check if the list exists, if not create it: if ""Item not found"" in ooo_list: demisto.results(demisto.executeCommand(""createList"", {""listName"": list_name, ""listData"": []})) ooo_list = demisto.executeCommand(""getList"", {""listName"": list_name})[0][""Contents""] # check status of the list, and add/remove the user from it. if not ooo_list: list_data = [] else: list_data = json.loads(ooo_list) # loop the list, removing any where the offuntil is in the past remove = [] for i in list_data: off_until = datetime.strptime(i['offuntil'], ""%Y-%m-%d"") if off_until < now: remove.append(i['user']) # remove the users from the list. list_data = [i for i in list_data if i['user'] not in remove] # set the list, return results demisto.executeCommand(""setList"", {""listName"": list_name, ""listData"": json.dumps(list_data)}) demisto.results(f""Removed Users from Out of Office List {list_name}: {str(remove)}"") " 2647,"def test_no_data_conversion_warning(): # No warnings issued if metric is not a boolean distance function rng = np.random.RandomState(0) X = rng.randn(5, 4) with warnings.catch_warnings(): warnings.simplefilter(""error"", UserWarning) pairwise_distances(X, metric=""minkowski"") ","def test_no_data_conversion_warning(): # No warnings issued if metric is not a boolean distance function rng = np.random.RandomState(0) X = rng.randn(5, 4) with warnings.catch_warnings(): warnings.simplefilter(""error"", DataConversionWarning) pairwise_distances(X, metric=""minkowski"") " 30587,"def panorama_show_running_config_command(): target = str(demisto.args()['target']) if 'target' in demisto.args() else None result = panorama_show_running_config(target) return result ","def panorama_show_running_config_command(): target = demisto.args().get('target') result = panorama_show_running_config(target) return result " 23342,"def packages_from(requirements, wheels, skip_packages): """""" Return a list of the entires in requirements that aren't found in wheels. Both assumed to be lists/iterables of strings formatted like ""name==version"". """""" packages = set(requirements) - set(wheels) - set(skip_packages) return [package_name(p) for p in packages] ","def packages_from(requirements, wheels, skip_packages): """""" Return a list of the entries in requirements that aren't found in wheels. Both assumed to be lists/iterables of strings formatted like ""name==version"". """""" packages = set(requirements) - set(wheels) - set(skip_packages) return [package_name(p) for p in packages] " 24593,"def thermal_speed_coefficients(method: str, ndim: int) -> float: r"""""" Get the appropriate coefficient for calculating the thermal speed :math:`v_{th}` based on the given ``method`` and ``ndim``. (See the `~plasmapy.formulary.parameters.thermal_speed` :ref:`Notes ` section for further details.) Parameters ---------- method : `str` Method to be used for calculating the thermal speed. Valid values are ``""most_probable""``, ``""rms""``, ``""mean_magnitude""``, and ``""nrl""``. ndim : `int` Dimensionality (1D, 2D, 3D) of space in which to calculate thermal speed. Valid values are ``1``, ``2``, or ``3``. Raises ------ `ValueError` If ``method`` or ``ndim`` are not a valid value. Notes ----- For a detailed explanation of the different coefficients used to calculate the therml speed, then look to the :ref:`Notes ` section for `~plasmapy.formulary.parameters.thermal_speed`. The possible return values are listed the table .. table:: Thermal speed :math:`v_{th}` coefficients. :widths: 2 1 1 1 1 :width: 100% +--------------+------------+---------------+---------------+---------------+ | ↓ **method** | **ndim** → | ``1`` | ``2`` | ``3`` | +--------------+------------+---------------+---------------+---------------+ | ``""most_probable""`` | .. math:: | .. math:: | .. math:: | | | 0 | 1 | \sqrt{2} | +--------------+------------+---------------+---------------+---------------+ | ``""rms""`` | .. math:: | .. math:: | .. math:: | | | 1 | \sqrt{2} | \sqrt{3} | +--------------+------------+---------------+---------------+---------------+ | ``""mean_magnitude""`` | .. math:: | .. math:: | .. math:: | | | \sqrt{2/π} | \sqrt{π/2} | \sqrt{8/π} | +--------------+------------+---------------+---------------+---------------+ | ``""nrl""`` | .. math:: | | | 1 | +--------------+------------+---------------+---------------+---------------+ Examples -------- >>> thermal_speed_coefficients(method=""most_probable"", ndim=3) 1.414213... """""" _coefficients = { (1, ""most_probable""): 0, (2, ""most_probable""): 1, (3, ""most_probable""): np.sqrt(2), (1, ""rms""): 1, (2, ""rms""): np.sqrt(2), (3, ""rms""): np.sqrt(3), (1, ""mean_magnitude""): np.sqrt(2 / np.pi), (2, ""mean_magnitude""): np.sqrt(np.pi / 2), (3, ""mean_magnitude""): np.sqrt(8 / np.pi), (1, ""nrl""): 1, (2, ""nrl""): 1, (3, ""nrl""): 1, } try: coeff = _coefficients[(ndim, method)] except KeyError: raise ValueError( f""Value for (ndim, method) pair not valid, got '({ndim}, {method})'."" ) return coeff ","def thermal_speed_coefficients(method: str, ndim: int) -> float: r"""""" Get the appropriate coefficient for calculating the thermal speed :math:`v_{th}` based on the given ``method`` and ``ndim``. (See the `~plasmapy.formulary.parameters.thermal_speed` :ref:`Notes ` section for further details.) Parameters ---------- method : `str` Method to be used for calculating the thermal speed. Valid values are ``""most_probable""``, ``""rms""``, ``""mean_magnitude""``, and ``""nrl""``. ndim : `int` Dimensionality (1D, 2D, 3D) of space in which to calculate thermal speed. Valid values are ``1``, ``2``, or ``3``. Raises ------ `ValueError` If ``method`` or ``ndim`` are not a valid value. Notes ----- For a detailed explanation of the different coefficients used to calculate the thermal speed, then look to the :ref:`Notes ` section for `~plasmapy.formulary.parameters.thermal_speed`. The possible return values are listed the table .. table:: Thermal speed :math:`v_{th}` coefficients. :widths: 2 1 1 1 1 :width: 100% +--------------+------------+---------------+---------------+---------------+ | ↓ **method** | **ndim** → | ``1`` | ``2`` | ``3`` | +--------------+------------+---------------+---------------+---------------+ | ``""most_probable""`` | .. math:: | .. math:: | .. math:: | | | 0 | 1 | \sqrt{2} | +--------------+------------+---------------+---------------+---------------+ | ``""rms""`` | .. math:: | .. math:: | .. math:: | | | 1 | \sqrt{2} | \sqrt{3} | +--------------+------------+---------------+---------------+---------------+ | ``""mean_magnitude""`` | .. math:: | .. math:: | .. math:: | | | \sqrt{2/π} | \sqrt{π/2} | \sqrt{8/π} | +--------------+------------+---------------+---------------+---------------+ | ``""nrl""`` | .. math:: | | | 1 | +--------------+------------+---------------+---------------+---------------+ Examples -------- >>> thermal_speed_coefficients(method=""most_probable"", ndim=3) 1.414213... """""" _coefficients = { (1, ""most_probable""): 0, (2, ""most_probable""): 1, (3, ""most_probable""): np.sqrt(2), (1, ""rms""): 1, (2, ""rms""): np.sqrt(2), (3, ""rms""): np.sqrt(3), (1, ""mean_magnitude""): np.sqrt(2 / np.pi), (2, ""mean_magnitude""): np.sqrt(np.pi / 2), (3, ""mean_magnitude""): np.sqrt(8 / np.pi), (1, ""nrl""): 1, (2, ""nrl""): 1, (3, ""nrl""): 1, } try: coeff = _coefficients[(ndim, method)] except KeyError: raise ValueError( f""Value for (ndim, method) pair not valid, got '({ndim}, {method})'."" ) return coeff " 43663,"def meanfield( name, geometry, charge=0, mult=1, basis=""sto-3g"", package=""pyscf"", outpath=""."" ): # pylint: disable=too-many-arguments r""""""Generates a file from which the mean field electronic structure of the molecule can be retrieved. This function uses OpenFermion-PySCF and OpenFermion-Psi4 plugins to perform the Hartree-Fock (HF) calculation for the polyatomic system using the quantum chemistry packages ``PySCF`` and ``Psi4``, respectively. The mean field electronic structure is saved in an hdf5-formatted file in the directory ``os.path.join(outpath, package, basis)``. Args: name (str): String used to label the molecule geometry (list): List containing the symbol and Cartesian coordinates for each atom charge (int): Net charge of the system mult (int): Spin multiplicity :math:`\mathrm{mult}=N_\mathrm{unpaired} + 1` for :math:`N_\mathrm{unpaired}` unpaired electrons occupying the HF orbitals. Possible values for ``mult`` are :math:`1, 2, 3, \ldots`. If not specified, a closed-shell HF state is assumed. basis (str): Atomic basis set used to represent the HF orbitals. Basis set availability per element can be found `here `_ package (str): Quantum chemistry package used to solve the Hartree-Fock equations. Either ``'pyscf'`` or ``'psi4'`` can be used. outpath (str): Path to output directory Returns: str: full path to the file containing the mean field electronic structure **Example** >>> name = 'h2' >>> geometry = [['H', (0.0, 0.0, -0.35)], ['H', (0.0, 0.0, 0.35)]] >>> meanfield(name, geometry) ./pyscf/sto-3g/h2 """""" package = package.strip().lower() if package not in (""psi4"", ""pyscf""): error_message = ( ""Integration with quantum chemistry package '{}' is not available. \n Please set"" "" 'package' to 'pyscf' or 'psi4'."".format(package) ) raise TypeError(error_message) package_dir = os.path.join(outpath.strip(), package) basis_dir = os.path.join(package_dir, basis.strip()) if not os.path.isdir(package_dir): os.mkdir(package_dir) os.mkdir(basis_dir) elif not os.path.isdir(basis_dir): os.mkdir(basis_dir) path_to_file = os.path.join(basis_dir, name.strip()) molecule = MolecularData(geometry, basis, mult, charge, filename=path_to_file) if package == ""psi4"": run_psi4(molecule, run_scf=1, verbose=0, tolerate_error=1) if package == ""pyscf"": run_pyscf(molecule, run_scf=1, verbose=0) return path_to_file ","def meanfield( name, geometry, charge=0, mult=1, basis=""sto-3g"", package=""pyscf"", outpath=""."" ): # pylint: disable=too-many-arguments r""""""Generates a file from which the mean field electronic structure of the molecule can be retrieved. This function uses OpenFermion-PySCF and OpenFermion-Psi4 plugins to perform the Hartree-Fock (HF) calculation for the polyatomic system using the quantum chemistry packages ``PySCF`` and ``Psi4``, respectively. The mean field electronic structure is saved in an hdf5-formatted file in the directory ``os.path.join(outpath, package, basis)``. Args: name (str): String used to label the molecule geometry (list): List containing the symbol and Cartesian coordinates for each atom charge (int): Net charge of the system mult (int): Spin multiplicity :math:`\mathrm{mult}=N_\mathrm{unpaired} + 1` for :math:`N_\mathrm{unpaired}` unpaired electrons occupying the HF orbitals. Possible values for ``mult`` are :math:`1, 2, 3, \ldots`. If not specified, a closed-shell HF state is assumed. basis (str): Atomic basis set used to represent the HF orbitals. Basis set availability per element can be found `here `_ package (str): Quantum chemistry package used to solve the Hartree-Fock equations. Either ``'pyscf'`` or ``'psi4'`` can be used. outpath (str): Path to output directory Returns: str: absolute path to the file containing the mean field electronic structure **Example** >>> name = 'h2' >>> geometry = [['H', (0.0, 0.0, -0.35)], ['H', (0.0, 0.0, 0.35)]] >>> meanfield(name, geometry) ./pyscf/sto-3g/h2 """""" package = package.strip().lower() if package not in (""psi4"", ""pyscf""): error_message = ( ""Integration with quantum chemistry package '{}' is not available. \n Please set"" "" 'package' to 'pyscf' or 'psi4'."".format(package) ) raise TypeError(error_message) package_dir = os.path.join(outpath.strip(), package) basis_dir = os.path.join(package_dir, basis.strip()) if not os.path.isdir(package_dir): os.mkdir(package_dir) os.mkdir(basis_dir) elif not os.path.isdir(basis_dir): os.mkdir(basis_dir) path_to_file = os.path.join(basis_dir, name.strip()) molecule = MolecularData(geometry, basis, mult, charge, filename=path_to_file) if package == ""psi4"": run_psi4(molecule, run_scf=1, verbose=0, tolerate_error=1) if package == ""pyscf"": run_pyscf(molecule, run_scf=1, verbose=0) return path_to_file " 20223,"def run(single_school=None): """"""Get percentile rankings for schools by degree, control, and state."""""" count = 0 starter = datetime.datetime.now() base_query = build_base_cohorts() if single_school: # a single_school query is allowed to sidestep exclusions base_query = School.objects.filter(pk=single_school) for school in base_query: by_degree = {} by_state = {} by_control = {} count += 1 if count % 500 == 0: # pragma: no cover logger.info(count) # degree_cohort is the default, national base cohort degree_cohort = DEGREE_COHORTS.get(school.degrees_highest) state_cohort = [ s for s in degree_cohort if s and s.state and s.state == school.state ] if not school.control: control_cohort = None elif school.control == 'Public': control_cohort = [ s for s in degree_cohort if s.control == school.control ] else: control_cohort = [ s for s in degree_cohort if s.control != 'Public' ] for metric in ['grad_rate', 'repay_3yr', 'median_total_debt']: if not getattr(school, metric): by_state.update({metric: None}) by_control.update({metric: None}) by_degree.update({metric: None}) else: if state_cohort: by_state.update({ metric: rank_by_metric(school, state_cohort, metric) }) if control_cohort: by_control.update({ metric: rank_by_metric(school, control_cohort, metric) }) if degree_cohort: by_degree.update({ metric: rank_by_metric(school, degree_cohort, metric) }) school.cohort_ranking_by_state = by_state school.cohort_ranking_by_control = by_control school.cohort_ranking_by_highest_degree = by_degree school.save() sys.stdout.write('.') sys.stdout.flush() logger.info(""\nCohort script took {} to process {} schools"".format( datetime.datetime.now() - starter, count )) ","def run(single_school=None): """"""Get percentile rankings for schools by degree, control, and state."""""" count = 0 starter = datetime.datetime.now() base_query = build_base_cohorts() if single_school: # a single_school query is allowed to sidestep exclusions base_query = School.objects.filter(pk=single_school) for school in base_query: by_degree = {} by_state = {} by_control = {} count += 1 if count % 500 == 0: # pragma: no cover logger.info(count) # degree_cohort is the default, national base cohort degree_cohort = DEGREE_COHORTS.get(school.degrees_highest, []) state_cohort = [ s for s in degree_cohort if s and s.state and s.state == school.state ] if not school.control: control_cohort = None elif school.control == 'Public': control_cohort = [ s for s in degree_cohort if s.control == school.control ] else: control_cohort = [ s for s in degree_cohort if s.control != 'Public' ] for metric in ['grad_rate', 'repay_3yr', 'median_total_debt']: if not getattr(school, metric): by_state.update({metric: None}) by_control.update({metric: None}) by_degree.update({metric: None}) else: if state_cohort: by_state.update({ metric: rank_by_metric(school, state_cohort, metric) }) if control_cohort: by_control.update({ metric: rank_by_metric(school, control_cohort, metric) }) if degree_cohort: by_degree.update({ metric: rank_by_metric(school, degree_cohort, metric) }) school.cohort_ranking_by_state = by_state school.cohort_ranking_by_control = by_control school.cohort_ranking_by_highest_degree = by_degree school.save() sys.stdout.write('.') sys.stdout.flush() logger.info(""\nCohort script took {} to process {} schools"".format( datetime.datetime.now() - starter, count )) " 3687,"def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, names=None, titles=None, aligned=False, byteorder=None): """"""Create a (read-only) record array from binary data contained in a string Parameters ---------- datastring : str Binary data contained in a string dtype : data-type, optional Valid dtype for all arrays shape : int or tuple of ints, optional Shape of each array. offset : int, optional Position in the file to start reading from. formats, names, titles, aligned, byteorder : If `dtype` is ``None``, these arguments are passed to `numpy.format_parser` to construct a dtype. See that function for detailed documentation. Returns ------- np.recarray record array consisting of data in datastring. Examples -------- >>> a = np.empty(10,dtype='f8,i4,a5') >>> a[5] = (0.5,10,'abcde') >>> b=np.core.records.fromstring(a.tostring(), formats='f8,i4,a5', shape=10, ... byteorder='<') >>> print(b[5]) (0.5, 10, 'abcde') >>> b.shape (10,) """""" if dtype is None and formats is None: raise TypeError(""fromstring() needs a 'dtype' or 'formats' argument"") if dtype is not None: descr = sb.dtype(dtype) else: descr = format_parser(formats, names, titles, aligned, byteorder).dtype itemsize = descr.itemsize # NumPy 1.19.0, 2020-01-01 shape = _deprecate_shape_0_as_None(shape) if shape in (None, -1): shape = (len(datastring) - offset) // itemsize _array = recarray(shape, descr, buf=datastring, offset=offset) return _array ","def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, names=None, titles=None, aligned=False, byteorder=None): """"""Create a (read-only) record array from binary data contained in a string Parameters ---------- datastring : str Binary data contained in a string dtype : data-type, optional Valid dtype for all arrays shape : int or tuple of ints, optional Shape of each array. offset : int, optional Position in the file to start reading from. formats, names, titles, aligned, byteorder : If `dtype` is ``None``, these arguments are passed to `numpy.format_parser` to construct a dtype. See that function for detailed documentation. Returns ------- np.recarray Read-only record array consisting of data in datastring. Examples -------- >>> a = np.empty(10,dtype='f8,i4,a5') >>> a[5] = (0.5,10,'abcde') >>> b=np.core.records.fromstring(a.tostring(), formats='f8,i4,a5', shape=10, ... byteorder='<') >>> print(b[5]) (0.5, 10, 'abcde') >>> b.shape (10,) """""" if dtype is None and formats is None: raise TypeError(""fromstring() needs a 'dtype' or 'formats' argument"") if dtype is not None: descr = sb.dtype(dtype) else: descr = format_parser(formats, names, titles, aligned, byteorder).dtype itemsize = descr.itemsize # NumPy 1.19.0, 2020-01-01 shape = _deprecate_shape_0_as_None(shape) if shape in (None, -1): shape = (len(datastring) - offset) // itemsize _array = recarray(shape, descr, buf=datastring, offset=offset) return _array " 8008,"def test_activity_of_tritium(): """"""Checks that 1g of tritium has the correct activity activity scaling"""""" m1 = openmc.Material() m1.add_nuclide(""H3"", 1) m1.set_density('g/cm3', 1) m1.volume = 1 assert pytest.approx(m1.activity) == 3.559778e14 m1.set_density('g/cm3', 2) assert pytest.approx(m1.activity) == 3.559778e14*2 m1.volume = 3 assert pytest.approx(m1.activity) == 3.559778e14*2*3 ","def test_activity_of_tritium(): """"""Checks that 1g of tritium has the correct activity scaling"""""" m1 = openmc.Material() m1.add_nuclide(""H3"", 1) m1.set_density('g/cm3', 1) m1.volume = 1 assert pytest.approx(m1.activity) == 3.559778e14 m1.set_density('g/cm3', 2) assert pytest.approx(m1.activity) == 3.559778e14*2 m1.volume = 3 assert pytest.approx(m1.activity) == 3.559778e14*2*3 " 39121,"def _append_metadata(json_data): """""" Appends necessary _metadata fields s for downstream processing and event splitting """""" default_metadata = {'@uuid': str(uuid.uuid4()), '@timestamp': datetime.utcnow().isoformat()} if isinstance(json_data, str): json_data = json.loads(json_data) if '_metadata' not in json_data.keys(): json_data['_metadata'] = default_metadata return json.dumps(json_data) ","def _append_metadata(json_data): """""" Appends necessary _metadata fields s for downstream processing and event splitting """""" default_metadata = {'@uuid': str(uuid.uuid4()), '@timestamp': datetime.utcnow().isoformat()} if isinstance(json_data, str): json_data = json.loads(json_data) if '_metadata' not in json_data: json_data['_metadata'] = default_metadata return json.dumps(json_data) " 39118,"def filter_fields(response, fields=None): """"""Filter response fields. Args: response (dict): Response as a dictionary. fields (list): List of fields to filter on. Returns: dict: Filtered response. """""" if fields is None: return response return {k: v for k, v in response.items() if k in fields} ","def filter_fields(response, fields=None): """"""Filter response fields. Args: response (dict): Response as a dictionary. fields (list): List of fields to filter on. Returns: dict: Filtered response. """""" if fields is None: return response return {k: reponse[k] for k in response.keys() & set(fields)} " 40737,"def JaccardIndex(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda: r""""""Calculates the Jaccard Index using :class:`~ignite.metrics.ConfusionMatrix` metric. This is the same like the IoU metric .. math:: \text{J}(A, B) = \frac{ \lvert A \cap B \rvert }{ \lvert A \cup B \rvert } Args: cm (ConfusionMatrix): instance of confusion matrix metric ignore_index (int, optional): index to ignore, e.g. background index Returns: MetricsLambda Examples: .. code-block:: python train_evaluator = ... cm = ConfusionMatrix(num_classes=num_classes) JaccardIndex(cm, ignore_index=0).attach(train_evaluator, 'IoU') state = train_evaluator.run(train_dataset) # state.metrics['JaccardIndex'] -> tensor of shape (num_classes - 1, ) """""" return IoU(cm, ignore_index) ","def JaccardIndex(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda: r""""""Calculates the Jaccard Index using :class:`~ignite.metrics.ConfusionMatrix` metric. This is the same like the IoU metric .. math:: \text{J}(A, B) = \frac{ \lvert A \cap B \rvert }{ \lvert A \cup B \rvert } Args: cm (ConfusionMatrix): instance of confusion matrix metric ignore_index (int, optional): index to ignore, e.g. background index Returns: MetricsLambda Examples: .. code-block:: python train_evaluator = ... cm = ConfusionMatrix(num_classes=num_classes) JaccardIndex(cm, ignore_index=0).attach(train_evaluator, 'JaccardIndex') state = train_evaluator.run(train_dataset) # state.metrics['JaccardIndex'] -> tensor of shape (num_classes - 1, ) """""" return IoU(cm, ignore_index) " 50155,"def my_hook(ctx: FunctionDecoratorContext) -> bool: ctx.decoratedFunction.func.is_property = True ctx.decoratedFunction.var.is_property = True return True ","def my_hook(ctx: FunctionDecoratorContext) -> bool: ctx.decorated_function.func.is_property = True ctx.decorated_function.var.is_property = True return True " 17821,"def download(): response = urllib2.urlopen('https://raw.githubusercontent.com/dictation-toolbox/caster/develop/_caster.py') html = response.read() directory = finddirectory() filename = directory + '\\_caster.py' f = open(filename, 'w') f.write(html) ","def download(): response = urllib2.urlopen('https://raw.githubusercontent.com/dictation-toolbox/caster/develop/_caster.py', timeout=10) html = response.read() directory = finddirectory() filename = directory + '\\_caster.py' f = open(filename, 'w') f.write(html) " 11803,"def add(image1, image2, scale=1.0, offset=0): """""" Adds two images, dividing the result by scale and adding the offset. If omitted, scale defaults to 1.0, and offset to 0.0. At least one of the images must be ""1"" mode. .. code-block:: python out = ((image1 + image2) / scale + offset) :rtype: :py:class:`~PIL.Image.Image` """""" image1.load() image2.load() return image1._new(image1.im.chop_add(image2.im, scale, offset)) ","def add(image1, image2, scale=1.0, offset=0): """""" Adds two images, dividing the result by scale and adding the offset. If omitted, scale defaults to 1.0, and offset to 0.0. At least one of the images must have mode ""1"". .. code-block:: python out = ((image1 + image2) / scale + offset) :rtype: :py:class:`~PIL.Image.Image` """""" image1.load() image2.load() return image1._new(image1.im.chop_add(image2.im, scale, offset)) " 5810,"def permutation_test(data, statistic, *, permutation_type='independent', vectorized=False, n_resamples=9999, batch=None, alternative=""two-sided"", axis=0, random_state=None): r"""""" Performs a permutation test of a given statistic on provided data. For independent sample statistics, the null hypothesis is that the data are randomly sampled from the same distribution. For paired sample statistics, two null hypothesis can be tested: that the data are paired at random or that the data are assigned to samples at random. Parameters ---------- data : iterable of array-like Contains the samples, each of which is an array of observations. Dimensions of sample arrays must be compatible for broadcasting except along `axis`. statistic : callable Statistic for which the p-value of the hypothesis test is to be calculated. `statistic` must be a callable that accepts samples as separate arguments (e.g. ``statistic(*data)``) and returns the resulting statistic. If `vectorized` is set ``True``, `statistic` must also accept a keyword argument `axis` and be vectorized to compute the statistic along the provided `axis` of the sample arrays. permutation_type : {'independent', 'samples', 'pairings'}, optional The type of permutations to be performed, in accordance with the null hypothesis. The first two permutation types are for paired sample statistics, in which all samples contain the same number of observations and observations with corresponding indices along `axis` are considered to be paired; the third is for independent sample statistics. - ``'samples'`` : observations are assigned to different samples but remain paired with the same observations from other samples. This permutation type is appropriate for paired sample hypothesis tests such as the Wilcoxon signed-rank test and the paired t-test. - ``'pairings'`` : observations are paired with different observations, but they remain within the same sample. This permutation type is appropriate for association/correlation tests with statistics such as Spearman's :math:`\rho`, Kendall's :math:`\tau`, and Pearson's :math:`r`. - ``'independent'`` (default) : observations are assigned to different samples. Samples may contain different numbers of observations. This permutation type is appropriate for independent sample hypothesis tests such as the Mann-Whitney :math:`U` test and the independent sample t-test. Please see the Notes section below for more detailed descriptions of the permutation types. vectorized : bool, default: ``False`` By default, `statistic` is assumed to calculate the statistic only for 1D arrays contained in `data`. If `vectorized` is set ``True``, `statistic` must also accept a keyword argument `axis` and be vectorized to compute the statistic along the provided `axis` of the ND arrays in `data`. Use of a vectorized statistic can reduce computation time. n_resamples : int or np.inf, default: 9999 Number of random permutations (resamples) used to approximate the null distribution. If greater than or equal to the number of distinct permutations, the exact null distribution will be computed. Note that the number of distinct permutations grows very rapidly with the sizes of samples, so exact tests are feasible only for very small data sets. batch : int, optional The number of permutations to process in each call to `statistic`. Memory usage is O(`batch`*``n``), where ``n`` is the total size of all samples, regardless of the value of `vectorized`. Default is ``None``, in which case ``batch`` is the number of permutations. alternative : {'two-sided', 'less', 'greater'}, optional The alternative hypothesis for which the p-value is calculated. For each alternative, the p-value is defined for exact tests as follows. - ``'greater'`` : the percentage of the null distribution that is greater than or equal to the observed value of the test statistic. - ``'less'`` : the percentage of the null distribution that is less than or equal to the observed value of the test statistic. - ``'two-sided'`` (default) : twice the smaller of the p-values above. Note that p-values for randomized tests are calculated according to the conservative (over-estimated) approximation suggested in [2]_ and [3]_ rather than the unbiased estimator suggested in [4]_. That is, when calculating the proportion of the randomized null distribution that is as extreme as the observed value of the test statistic, the values in the numerator and denominator are both increased by one. An interpretation of this adjustment is that the observed value of the test statistic is always included as an element of the randomized null distribution. The convention used for two-sided p-values is not universal; the observed test statistic and null distribution are returned in case a different definition is preferred. axis : int, default: 0 The axis of the (broadcasted) samples over which to calculate the statistic. If samples have a different number of dimensions, singleton dimensions are prepended to samples with fewer dimensions before `axis` is considered. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Pseudorandom number generator state used to generate permutations. If `random_state` is ``None`` (default), the `numpy.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with `random_state`. If `random_state` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Returns ------- statistic : float or ndarray The observed test statistic of the data. pvalue : float or ndarray The p-value for the given alternative. null_distribution : ndarray The values of the test statistic generated under the null hypothesis. Notes ----- The three types of permutation tests supported by this function are described below. **Unpaired statistics** (``permutation_type='independent'``): The null hypothesis associated with this permutation type is that all observations are sampled from the same underlying distribution and that they have been assigned to one of the samples at random. Suppose ``data`` contains two samples; e.g. ``a, b = data``. When ``1 < n_resamples < binom(n, k)``, where * ``k`` is the number of observations in ``a``, * ``n`` is the total number of observations in ``a`` and ``b``, and * ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``), the data are pooled (concatenated), randomly assigned to either the first or second sample, and the statistic is calculated. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= binom(n, k)``, an exact test is performed: the data are *partitioned* between the samples in each distinct way exactly once, and the exact null distribution is formed. Note that for a given partitioning of the data between the samples, only one ordering/permutation of the data *within* each sample is considered. For statistics that do not depend on the order of the data within samples, this dramatically reduces computational cost without affecting the shape of the null distribution (because the frequency/count of each value is affected by the same factor). For ``a = [a1, a2, a3, a4]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``x = [b3, a1, a2, b2]`` and ``y = [a4, b1, a3]``. Because only one ordering/permutation of the data *within* each sample is considered in an exact test, a resampling like ``x = [b3, a1, b2, a2]`` and ``y = [a4, a3, b1]`` would *not* be considered distinct from the example above. ``permutation_type='independent'`` does not support one-sample statistics, but it can be applied to statistics with more than two samples. In this case, if ``n`` is an array of the number of observations within each sample, the number of distinct partitions is:: np.product([binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)]) **Paired statistics, permute pairings** (``permutation_type='pairings'``): The null hypothesis associated with this permutation type is that observations within each sample are drawn from the same underlying distribution and that pairings with elements of other samples are assigned at random. Suppose ``data`` contains only one sample; e.g. ``a, = data``, and we wish to consider all possible pairings of elements of ``a`` with elements of a second sample, ``b``. Let ``n`` be the number of observations in ``a``, which must also equal the number of observations in ``b``. When ``1 < n_resamples < factorial(n)``, the elements of ``a`` are randomly permuted. The user-supplied statistic accepts one data argument, say ``a_perm``, and calculates the statistic considering ``a_perm`` and ``b``. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= factorial(n)``, an exact test is performed: ``a`` is permuted in each distinct way exactly once. Therefore, the `statistic` is computed for each unique pairing of samples between ``a`` and ``b`` exactly once. For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``a_perm = [a3, a1, a2]`` while ``b`` is left in its original order. ``permutation_type='pairings'`` supports ``data`` containing any number of samples, each of which must contain the same number of observations. All samples provided in ``data`` are permuted *independently*. Therefore, if ``m`` is the number of samples and ``n`` is the number of observations within each sample, then the number of permutations in an exact test is:: factorial(n)**m Note that if a two-sample statistic, for example, does not inherently depend on the order in which observations are provided - only on the *pairings* of observations - then only one of the two samples should be provided in ``data``. This dramatically reduces computational cost without affecting the shape of the null distribution (because the frequency/count of each value is affected by the same factor). **Paired statistics, permute samples** (``permutation_type='samples'``): The null hypothesis associated with this permutation type is that observations within each pair are drawn from the same underlying distribution and that the sample to which they are assigned is random. Suppose ``data`` contains two samples; e.g. ``a, b = data``. Let ``n`` be the number of observations in ``a``, which must also equal the number of observations in ``b``. When ``1 < n_resamples < 2**n``, the elements of ``a`` are ``b`` are randomly swapped between samples (maintaining their pairings) and the statistic is calculated. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= 2**n``, an exact test is performed: the observations are assigned to the two samples in each distinct way (while maintaining pairings) exactly once. For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``x = [b1, a2, b3]`` and ``y = [a1, b2, a3]``. ``permutation_type='samples'`` supports ``data`` containing any number of samples, each of which must contain the same number of observations. If ``data`` contains more than one sample, paired observations within ``data`` are exchanged between samples *independently*. Therefore, if ``m`` is the number of samples and ``n`` is the number of observations within each sample, then the number of permutations in an exact test is:: factorial(m)**n Several paired-sample statistical tests, such as the Wilcoxon signed rank test and paired-sample t-test, can be performed considering only the *difference* between two paired elements. Accordingly, if ``data`` contains only one sample, then the null distribution is formed by independently changing the *sign* of each observation. .. warning:: The p-value is calculated by counting the elements of the null distribution that are as extreme or more extreme than the observed value of the statistic. Due to the use of finite precision arithmetic, some statistic functions return numerically distinct values when the theoretical values would be exactly equal. In some cases, this could lead to a large error in the calculated p-value. `permutation_test` guards against this by considering elements in the null distribution that are ""close"" (within a factor of ``1+1e-14``) to the observed value of the test statistic as equal to the observed value of the test statistic. However, the user is advised to inspect the null distribution to assess whether this method of comparison is appropriate, and if not, calculate the p-value manually. See example below. References ---------- .. [1] R. A. Fisher. The Design of Experiments, 6th Ed (1951). .. [2] B. Phipson and G. K. Smyth. ""Permutation P-values Should Never Be Zero: Calculating Exact P-values When Permutations Are Randomly Drawn."" Statistical Applications in Genetics and Molecular Biology 9.1 (2010). .. [3] M. D. Ernst. ""Permutation Methods: A Basis for Exact Inference"". Statistical Science (2004). .. [4] B. Efron and R. J. Tibshirani. An Introduction to the Bootstrap (1993). Examples -------- Suppose we wish to test whether two samples are drawn from the same distribution. Assume that the underlying distributions are unknown to us, and that before observing the data, we hypothesized that the mean of the first sample would be less than that of the second sample. We decide that we will use the difference between the sample means as a test statistic, and we will consider a p-value of 0.05 to be statistically significant. For efficiency, we write the function defining the test statistic in a vectorized fashion: the samples ``x`` and ``y`` can be ND arrays, and the statistic will be calculated for each axis-slice along `axis`. >>> def statistic(x, y, axis): ... return np.mean(x, axis=axis) - np.mean(y, axis=axis) After collecting our data, we calculate the observed value of the test statistic. >>> from scipy.stats import norm >>> rng = np.random.default_rng() >>> x = norm.rvs(size=5, random_state=rng) >>> y = norm.rvs(size=6, loc = 3, random_state=rng) >>> statistic(x, y, 0) -3.5411688580987266 Indeed, the test statistic is negative, suggesting that the true mean of the distribution underlying ``x`` is less than that of the distribution underlying ``y``. To determine the probability of this occuring by chance if the two samples were drawn from the same distribution, we perform a permutation test. >>> from scipy.stats import permutation_test >>> # because our statistic is vectorized, we pass `vectorized=True` >>> # `n_resamples=np.inf` indicates that an exact test is to be performed >>> res = permutation_test((x, y), statistic, vectorized=True, ... n_resamples=np.inf, alternative='less') >>> print(res.statistic) -3.5411688580987266 >>> print(res.pvalue) 0.004329004329004329 The probability of obtaining a test statistic less than or equal to the observed value under the null hypothesis is 0.4329%. This is less than our chosen threshold of 5%, so we consider this to to be significant evidence against the null hypothesis in favor of the alternative. Because the size of the samples above was small, `permutation_test` could perform an exact test. For larger samples, we resort to a randomized permutation test. >>> x = norm.rvs(size=100, random_state=rng) >>> y = norm.rvs(size=120, loc=0.3, random_state=rng) >>> res = permutation_test((x, y), statistic, n_resamples=100000, ... vectorized=True, alternative='less', ... random_state=rng) >>> print(res.statistic) -0.5230459671240913 >>> print(res.pvalue) 0.00016999830001699983 The approximate probability of obtaining a test statistic less than or equal to the observed value under the null hypothesis is 0.0225%. This is again less than our chosen threshold of 5%, so again we have significant evidence to reject the null hypothesis in favor of the alternative. For large samples and number of permutations, the result is comparable to that of the corresponding asymptotic test, the independent sample t-test. >>> from scipy.stats import ttest_ind >>> res_asymptotic = ttest_ind(x, y, alternative='less') >>> print(res_asymptotic.pvalue) 0.00012688101537979522 The permutation distribution of the test statistic is provided for further investigation. >>> import matplotlib.pyplot as plt >>> plt.hist(res.null_distribution, bins=50) >>> plt.title(""Permutation distribution of test statistic"") >>> plt.xlabel(""Value of Statistic"") >>> plt.ylabel(""Frequency"") Inspection of the null distribution is essential if the statistic suffers from inaccuracy due to limited machine precision. Consider the following case: >>> from scipy.stats import pearsonr >>> x = [1, 2, 4, 3] >>> y = [2, 4, 6, 8] >>> def statistic(x, y): ... return pearsonr(x, y).statistic >>> res = permutation_test((x, y), statistic, vectorized=False, ... permutation_type='pairings', ... alternative='greater') >>> r, pvalue, null = res.statistic, res.pvalue, res.null_distribution In this case, some elements of the null distribution differ from the observed value of the correlation coefficient ``r`` due to numerical noise. We manually inspect the elements of the null distribution that are nearly the same as the observed value of the test statistic. >>> r 0.8 >>> unique = np.unique(null) >>> unique array([-1. , -0.8, -0.8, -0.6, -0.4, -0.2, -0.2, 0. , 0.2, 0.2, 0.4, 0.6, 0.8, 0.8, 1. ]) >>> unique[np.isclose(r, unique)].tolist() [0.7999999999999999, 0.8] If `permutation_test` were to perform the comparison naively, the elements of the null distribution with value ``0.7999999999999999`` would not be considered as extreme or more extreme as the observed value of the statistic, so the calculated p-value would be too small. >>> incorrect_pvalue = np.count_nonzero(null >= r) / len(null) >>> incorrect_pvalue 0.1111111111111111 Instead, `permutation_test` treats elements of the null distribution that are within a factor of ``1+1e-14`` of the observed value of the statistic to be equal to the test statistic. >>> correct_pvalue = np.count_nonzero(null >= r / (1+1e-14)) / len(null) >>> correct_pvalue 0.16666666666666666 >>> res.pvalue == correct_pvalue True This method of comparison is expected to be accurate in most practical situations, but the user is advised to assess this by inspecting the elements of the null distribution that are close to the observed value of the statistic. Also, consider the use of statistics that can be calculated using exact arithmetic (e.g. integer statistics). """""" args = _permutation_test_iv(data, statistic, permutation_type, vectorized, n_resamples, batch, alternative, axis, random_state) (data, statistic, permutation_type, vectorized, n_resamples, batch, alternative, axis, random_state) = args observed = statistic(*data, axis=-1) null_calculators = {""pairings"": _calculate_null_pairings, ""samples"": _calculate_null_samples, ""independent"": _calculate_null_both} null_calculator_args = (data, statistic, n_resamples, batch, random_state) calculate_null = null_calculators[permutation_type] null_distribution, n_resamples, exact_test = ( calculate_null(*null_calculator_args)) # See References [2] and [3] adjustment = 0 if exact_test else 1 # relative tolerance for detecting numerically distinct but # theoretically equal values in the null distribution eps = 1e-14 def less(null_distribution, observed): cmps = null_distribution <= observed * (1+eps) pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) return pvalues def greater(null_distribution, observed): cmps = null_distribution >= observed / (1+eps) pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) return pvalues def two_sided(null_distribution, observed): pvalues_less = less(null_distribution, observed) pvalues_greater = greater(null_distribution, observed) pvalues = np.minimum(pvalues_less, pvalues_greater) * 2 return pvalues compare = {""less"": less, ""greater"": greater, ""two-sided"": two_sided} pvalues = compare[alternative](null_distribution, observed) pvalues = np.clip(pvalues, 0, 1) return PermutationTestResult(observed, pvalues, null_distribution) ","def permutation_test(data, statistic, *, permutation_type='independent', vectorized=False, n_resamples=9999, batch=None, alternative=""two-sided"", axis=0, random_state=None): r"""""" Performs a permutation test of a given statistic on provided data. For independent sample statistics, the null hypothesis is that the data are randomly sampled from the same distribution. For paired sample statistics, two null hypothesis can be tested: that the data are paired at random or that the data are assigned to samples at random. Parameters ---------- data : iterable of array-like Contains the samples, each of which is an array of observations. Dimensions of sample arrays must be compatible for broadcasting except along `axis`. statistic : callable Statistic for which the p-value of the hypothesis test is to be calculated. `statistic` must be a callable that accepts samples as separate arguments (e.g. ``statistic(*data)``) and returns the resulting statistic. If `vectorized` is set ``True``, `statistic` must also accept a keyword argument `axis` and be vectorized to compute the statistic along the provided `axis` of the sample arrays. permutation_type : {'independent', 'samples', 'pairings'}, optional The type of permutations to be performed, in accordance with the null hypothesis. The first two permutation types are for paired sample statistics, in which all samples contain the same number of observations and observations with corresponding indices along `axis` are considered to be paired; the third is for independent sample statistics. - ``'samples'`` : observations are assigned to different samples but remain paired with the same observations from other samples. This permutation type is appropriate for paired sample hypothesis tests such as the Wilcoxon signed-rank test and the paired t-test. - ``'pairings'`` : observations are paired with different observations, but they remain within the same sample. This permutation type is appropriate for association/correlation tests with statistics such as Spearman's :math:`\rho`, Kendall's :math:`\tau`, and Pearson's :math:`r`. - ``'independent'`` (default) : observations are assigned to different samples. Samples may contain different numbers of observations. This permutation type is appropriate for independent sample hypothesis tests such as the Mann-Whitney :math:`U` test and the independent sample t-test. Please see the Notes section below for more detailed descriptions of the permutation types. vectorized : bool, default: ``False`` By default, `statistic` is assumed to calculate the statistic only for 1D arrays contained in `data`. If `vectorized` is set ``True``, `statistic` must also accept a keyword argument `axis` and be vectorized to compute the statistic along the provided `axis` of the ND arrays in `data`. Use of a vectorized statistic can reduce computation time. n_resamples : int or np.inf, default: 9999 Number of random permutations (resamples) used to approximate the null distribution. If greater than or equal to the number of distinct permutations, the exact null distribution will be computed. Note that the number of distinct permutations grows very rapidly with the sizes of samples, so exact tests are feasible only for very small data sets. batch : int, optional The number of permutations to process in each call to `statistic`. Memory usage is O(`batch`*``n``), where ``n`` is the total size of all samples, regardless of the value of `vectorized`. Default is ``None``, in which case ``batch`` is the number of permutations. alternative : {'two-sided', 'less', 'greater'}, optional The alternative hypothesis for which the p-value is calculated. For each alternative, the p-value is defined for exact tests as follows. - ``'greater'`` : the percentage of the null distribution that is greater than or equal to the observed value of the test statistic. - ``'less'`` : the percentage of the null distribution that is less than or equal to the observed value of the test statistic. - ``'two-sided'`` (default) : twice the smaller of the p-values above. Note that p-values for randomized tests are calculated according to the conservative (over-estimated) approximation suggested in [2]_ and [3]_ rather than the unbiased estimator suggested in [4]_. That is, when calculating the proportion of the randomized null distribution that is as extreme as the observed value of the test statistic, the values in the numerator and denominator are both increased by one. An interpretation of this adjustment is that the observed value of the test statistic is always included as an element of the randomized null distribution. The convention used for two-sided p-values is not universal; the observed test statistic and null distribution are returned in case a different definition is preferred. axis : int, default: 0 The axis of the (broadcasted) samples over which to calculate the statistic. If samples have a different number of dimensions, singleton dimensions are prepended to samples with fewer dimensions before `axis` is considered. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Pseudorandom number generator state used to generate permutations. If `random_state` is ``None`` (default), the `numpy.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with `random_state`. If `random_state` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Returns ------- statistic : float or ndarray The observed test statistic of the data. pvalue : float or ndarray The p-value for the given alternative. null_distribution : ndarray The values of the test statistic generated under the null hypothesis. Notes ----- The three types of permutation tests supported by this function are described below. **Unpaired statistics** (``permutation_type='independent'``): The null hypothesis associated with this permutation type is that all observations are sampled from the same underlying distribution and that they have been assigned to one of the samples at random. Suppose ``data`` contains two samples; e.g. ``a, b = data``. When ``1 < n_resamples < binom(n, k)``, where * ``k`` is the number of observations in ``a``, * ``n`` is the total number of observations in ``a`` and ``b``, and * ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``), the data are pooled (concatenated), randomly assigned to either the first or second sample, and the statistic is calculated. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= binom(n, k)``, an exact test is performed: the data are *partitioned* between the samples in each distinct way exactly once, and the exact null distribution is formed. Note that for a given partitioning of the data between the samples, only one ordering/permutation of the data *within* each sample is considered. For statistics that do not depend on the order of the data within samples, this dramatically reduces computational cost without affecting the shape of the null distribution (because the frequency/count of each value is affected by the same factor). For ``a = [a1, a2, a3, a4]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``x = [b3, a1, a2, b2]`` and ``y = [a4, b1, a3]``. Because only one ordering/permutation of the data *within* each sample is considered in an exact test, a resampling like ``x = [b3, a1, b2, a2]`` and ``y = [a4, a3, b1]`` would *not* be considered distinct from the example above. ``permutation_type='independent'`` does not support one-sample statistics, but it can be applied to statistics with more than two samples. In this case, if ``n`` is an array of the number of observations within each sample, the number of distinct partitions is:: np.product([binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)]) **Paired statistics, permute pairings** (``permutation_type='pairings'``): The null hypothesis associated with this permutation type is that observations within each sample are drawn from the same underlying distribution and that pairings with elements of other samples are assigned at random. Suppose ``data`` contains only one sample; e.g. ``a, = data``, and we wish to consider all possible pairings of elements of ``a`` with elements of a second sample, ``b``. Let ``n`` be the number of observations in ``a``, which must also equal the number of observations in ``b``. When ``1 < n_resamples < factorial(n)``, the elements of ``a`` are randomly permuted. The user-supplied statistic accepts one data argument, say ``a_perm``, and calculates the statistic considering ``a_perm`` and ``b``. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= factorial(n)``, an exact test is performed: ``a`` is permuted in each distinct way exactly once. Therefore, the `statistic` is computed for each unique pairing of samples between ``a`` and ``b`` exactly once. For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``a_perm = [a3, a1, a2]`` while ``b`` is left in its original order. ``permutation_type='pairings'`` supports ``data`` containing any number of samples, each of which must contain the same number of observations. All samples provided in ``data`` are permuted *independently*. Therefore, if ``m`` is the number of samples and ``n`` is the number of observations within each sample, then the number of permutations in an exact test is:: factorial(n)**m Note that if a two-sample statistic, for example, does not inherently depend on the order in which observations are provided - only on the *pairings* of observations - then only one of the two samples should be provided in ``data``. This dramatically reduces computational cost without affecting the shape of the null distribution (because the frequency/count of each value is affected by the same factor). **Paired statistics, permute samples** (``permutation_type='samples'``): The null hypothesis associated with this permutation type is that observations within each pair are drawn from the same underlying distribution and that the sample to which they are assigned is random. Suppose ``data`` contains two samples; e.g. ``a, b = data``. Let ``n`` be the number of observations in ``a``, which must also equal the number of observations in ``b``. When ``1 < n_resamples < 2**n``, the elements of ``a`` are ``b`` are randomly swapped between samples (maintaining their pairings) and the statistic is calculated. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= 2**n``, an exact test is performed: the observations are assigned to the two samples in each distinct way (while maintaining pairings) exactly once. For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``x = [b1, a2, b3]`` and ``y = [a1, b2, a3]``. ``permutation_type='samples'`` supports ``data`` containing any number of samples, each of which must contain the same number of observations. If ``data`` contains more than one sample, paired observations within ``data`` are exchanged between samples *independently*. Therefore, if ``m`` is the number of samples and ``n`` is the number of observations within each sample, then the number of permutations in an exact test is:: factorial(m)**n Several paired-sample statistical tests, such as the Wilcoxon signed rank test and paired-sample t-test, can be performed considering only the *difference* between two paired elements. Accordingly, if ``data`` contains only one sample, then the null distribution is formed by independently changing the *sign* of each observation. .. warning:: The p-value is calculated by counting the elements of the null distribution that are as extreme or more extreme than the observed value of the statistic. Due to the use of finite precision arithmetic, some statistic functions return numerically distinct values when the theoretical values would be exactly equal. In some cases, this could lead to a large error in the calculated p-value. `permutation_test` guards against this by considering elements in the null distribution that are ""close"" (within a factor of ``1+1e-14``) to the observed value of the test statistic as equal to the observed value of the test statistic. However, the user is advised to inspect the null distribution to assess whether this method of comparison is appropriate, and if not, calculate the p-value manually. See example below. References ---------- .. [1] R. A. Fisher. The Design of Experiments, 6th Ed (1951). .. [2] B. Phipson and G. K. Smyth. ""Permutation P-values Should Never Be Zero: Calculating Exact P-values When Permutations Are Randomly Drawn."" Statistical Applications in Genetics and Molecular Biology 9.1 (2010). .. [3] M. D. Ernst. ""Permutation Methods: A Basis for Exact Inference"". Statistical Science (2004). .. [4] B. Efron and R. J. Tibshirani. An Introduction to the Bootstrap (1993). Examples -------- Suppose we wish to test whether two samples are drawn from the same distribution. Assume that the underlying distributions are unknown to us, and that before observing the data, we hypothesized that the mean of the first sample would be less than that of the second sample. We decide that we will use the difference between the sample means as a test statistic, and we will consider a p-value of 0.05 to be statistically significant. For efficiency, we write the function defining the test statistic in a vectorized fashion: the samples ``x`` and ``y`` can be ND arrays, and the statistic will be calculated for each axis-slice along `axis`. >>> def statistic(x, y, axis): ... return np.mean(x, axis=axis) - np.mean(y, axis=axis) After collecting our data, we calculate the observed value of the test statistic. >>> from scipy.stats import norm >>> rng = np.random.default_rng() >>> x = norm.rvs(size=5, random_state=rng) >>> y = norm.rvs(size=6, loc = 3, random_state=rng) >>> statistic(x, y, 0) -3.5411688580987266 Indeed, the test statistic is negative, suggesting that the true mean of the distribution underlying ``x`` is less than that of the distribution underlying ``y``. To determine the probability of this occuring by chance if the two samples were drawn from the same distribution, we perform a permutation test. >>> from scipy.stats import permutation_test >>> # because our statistic is vectorized, we pass `vectorized=True` >>> # `n_resamples=np.inf` indicates that an exact test is to be performed >>> res = permutation_test((x, y), statistic, vectorized=True, ... n_resamples=np.inf, alternative='less') >>> print(res.statistic) -3.5411688580987266 >>> print(res.pvalue) 0.004329004329004329 The probability of obtaining a test statistic less than or equal to the observed value under the null hypothesis is 0.4329%. This is less than our chosen threshold of 5%, so we consider this to to be significant evidence against the null hypothesis in favor of the alternative. Because the size of the samples above was small, `permutation_test` could perform an exact test. For larger samples, we resort to a randomized permutation test. >>> x = norm.rvs(size=100, random_state=rng) >>> y = norm.rvs(size=120, loc=0.3, random_state=rng) >>> res = permutation_test((x, y), statistic, n_resamples=100000, ... vectorized=True, alternative='less', ... random_state=rng) >>> print(res.statistic) -0.5230459671240913 >>> print(res.pvalue) 0.00016999830001699983 The approximate probability of obtaining a test statistic less than or equal to the observed value under the null hypothesis is 0.0225%. This is again less than our chosen threshold of 5%, so again we have significant evidence to reject the null hypothesis in favor of the alternative. For large samples and number of permutations, the result is comparable to that of the corresponding asymptotic test, the independent sample t-test. >>> from scipy.stats import ttest_ind >>> res_asymptotic = ttest_ind(x, y, alternative='less') >>> print(res_asymptotic.pvalue) 0.00012688101537979522 The permutation distribution of the test statistic is provided for further investigation. >>> import matplotlib.pyplot as plt >>> plt.hist(res.null_distribution, bins=50) >>> plt.title(""Permutation distribution of test statistic"") >>> plt.xlabel(""Value of Statistic"") >>> plt.ylabel(""Frequency"") Inspection of the null distribution is essential if the statistic suffers from inaccuracy due to limited machine precision. Consider the following case: >>> from scipy.stats import pearsonr >>> x = [1, 2, 4, 3] >>> y = [2, 4, 6, 8] >>> def statistic(x, y): ... return pearsonr(x, y).statistic >>> res = permutation_test((x, y), statistic, vectorized=False, ... permutation_type='pairings', ... alternative='greater') >>> r, pvalue, null = res.statistic, res.pvalue, res.null_distribution In this case, some elements of the null distribution differ from the observed value of the correlation coefficient ``r`` due to numerical noise. We manually inspect the elements of the null distribution that are nearly the same as the observed value of the test statistic. >>> r 0.8 >>> unique = np.unique(null) >>> unique array([-1. , -0.8, -0.8, -0.6, -0.4, -0.2, -0.2, 0. , 0.2, 0.2, 0.4, 0.6, 0.8, 0.8, 1. ]) # may vary >>> unique[np.isclose(r, unique)].tolist() [0.7999999999999999, 0.8] If `permutation_test` were to perform the comparison naively, the elements of the null distribution with value ``0.7999999999999999`` would not be considered as extreme or more extreme as the observed value of the statistic, so the calculated p-value would be too small. >>> incorrect_pvalue = np.count_nonzero(null >= r) / len(null) >>> incorrect_pvalue 0.1111111111111111 Instead, `permutation_test` treats elements of the null distribution that are within a factor of ``1+1e-14`` of the observed value of the statistic to be equal to the test statistic. >>> correct_pvalue = np.count_nonzero(null >= r / (1+1e-14)) / len(null) >>> correct_pvalue 0.16666666666666666 >>> res.pvalue == correct_pvalue True This method of comparison is expected to be accurate in most practical situations, but the user is advised to assess this by inspecting the elements of the null distribution that are close to the observed value of the statistic. Also, consider the use of statistics that can be calculated using exact arithmetic (e.g. integer statistics). """""" args = _permutation_test_iv(data, statistic, permutation_type, vectorized, n_resamples, batch, alternative, axis, random_state) (data, statistic, permutation_type, vectorized, n_resamples, batch, alternative, axis, random_state) = args observed = statistic(*data, axis=-1) null_calculators = {""pairings"": _calculate_null_pairings, ""samples"": _calculate_null_samples, ""independent"": _calculate_null_both} null_calculator_args = (data, statistic, n_resamples, batch, random_state) calculate_null = null_calculators[permutation_type] null_distribution, n_resamples, exact_test = ( calculate_null(*null_calculator_args)) # See References [2] and [3] adjustment = 0 if exact_test else 1 # relative tolerance for detecting numerically distinct but # theoretically equal values in the null distribution eps = 1e-14 def less(null_distribution, observed): cmps = null_distribution <= observed * (1+eps) pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) return pvalues def greater(null_distribution, observed): cmps = null_distribution >= observed / (1+eps) pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) return pvalues def two_sided(null_distribution, observed): pvalues_less = less(null_distribution, observed) pvalues_greater = greater(null_distribution, observed) pvalues = np.minimum(pvalues_less, pvalues_greater) * 2 return pvalues compare = {""less"": less, ""greater"": greater, ""two-sided"": two_sided} pvalues = compare[alternative](null_distribution, observed) pvalues = np.clip(pvalues, 0, 1) return PermutationTestResult(observed, pvalues, null_distribution) " 46,"def normalize_lcc_range(start, end): """""" :param str start: LCC prefix to start range :param str end: LCC prefix to end range :return: range with prefixes being prefixes for sortable LCCs :rtype: [str, str] """""" return [ lcc if lcc == '*' else short_lcc_to_sortable_lcc(lcc) for lcc in (start, end) ] ","def normalize_lcc_range(start, end): """""" :param str start: LCC prefix to start range :param str end: LCC prefix to end range :return: range with prefixes being prefixes for sortable LCCs :rtype: [str, str] """""" return [short_lcc_to_sortable_lcc(lcc) for lcc in (start, end)] " 36091,"def get_formula(symbol_list, mode='hill', separator=''): """""" Return a string with the chemical formula. :param symbol_list: a list of symbols, e.g. ``['H','H','O']`` :param mode: a string to specify how to generate the formula, can assume one of the following values: * 'hill' (default): count the number of atoms of each species, then use Hill notation, i.e. alphabetical order with C and H first if one or several C atom(s) is (are) present, e.g. ``['C','H','H','H','O','C','H','H','H']`` will return ``'C2H6O'`` ``['S','O','O','H','O','H','O']`` will return ``'H2O4S'`` From E. A. Hill, J. Am. Chem. Soc., 22 (8), pp 478–494 (1900) * 'hill_compact': same as hill but the number of atoms for each species is divided by the greatest common divisor of all of them, e.g. ``['C','H','H','H','O','C','H','H','H','O','O','O']`` will return ``'CH3O2'`` * 'reduce': group repeated symbols e.g. ``['Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'Ti', 'O', 'O', 'O']`` will return ``'BaTiO3BaTiO3BaTi2O3'`` * 'group': will try to group as much as possible parts of the formula e.g. ``['Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'Ti', 'O', 'O', 'O']`` will return ``'(BaTiO3)2BaTi2O3'`` * 'count': same as hill (i.e. one just counts the number of atoms of each species) without the re-ordering (take the order of the atomic sites), e.g. ``['Ba', 'Ti', 'O', 'O', 'O','Ba', 'Ti', 'O', 'O', 'O']`` will return ``'Ba2Ti2O6'`` * 'count_compact': same as count but the number of atoms for each species is divided by the greatest common divisor of all of them, e.g. ``['Ba', 'Ti', 'O', 'O', 'O','Ba', 'Ti', 'O', 'O', 'O']`` will return ``'BaTiO3'`` :param separator: a string used to concatenate symbols. Default empty. :return: a string with the formula .. note:: in modes reduce, group, count and count_compact, the initial order in which the atoms were appended by the user is used to group and/or order the symbols in the formula """""" if mode == 'group': return get_formula_group(symbol_list, separator=separator) # for hill and count cases, simply count the occurences of each # chemical symbol (with some re-ordering in hill) if mode in ['hill', 'hill_compact']: symbol_set = set(symbol_list) first_symbols = [] for special_symbol in ['C', 'H']: # remove C and H if present from list and put them at the beginning if special_symbol in symbol_set: symbol_set.remove(special_symbol) first_symbols.append(special_symbol) ordered_symbol_set = first_symbols + list(sorted(symbol_set)) the_symbol_list = [[symbol_list.count(elem), elem] for elem in ordered_symbol_set] elif mode in ['count', 'count_compact']: ordered_symbol_indexes = sorted([symbol_list.index(elem) for elem in set(symbol_list)]) ordered_symbol_set = [symbol_list[i] for i in ordered_symbol_indexes] the_symbol_list = [[symbol_list.count(elem), elem] for elem in ordered_symbol_set] elif mode == 'reduce': the_symbol_list = group_symbols(symbol_list) else: raise ValueError('Mode should be hill, hill_compact, group, reduce, count or count_compact') if mode in ['hill_compact', 'count_compact']: from math import gcd the_gcd = functools.reduce(gcd, [e[0] for e in the_symbol_list]) the_symbol_list = [[e[0] // the_gcd, e[1]] for e in the_symbol_list] return get_formula_from_symbol_list(the_symbol_list, separator=separator) ","def get_formula(symbol_list, mode='hill', separator=''): """""" Return a string with the chemical formula. :param symbol_list: a list of symbols, e.g. ``['H','H','O']`` :param mode: a string to specify how to generate the formula, can assume one of the following values: * 'hill' (default): count the number of atoms of each species, then use Hill notation, i.e. alphabetical order with C and H first if one or several C atom(s) is (are) present, e.g. ``['C','H','H','H','O','C','H','H','H']`` will return ``'C2H6O'`` ``['S','O','O','H','O','H','O']`` will return ``'H2O4S'`` From E. A. Hill, J. Am. Chem. Soc., 22 (8), pp 478–494 (1900) * 'hill_compact': same as hill but the number of atoms for each species is divided by the greatest common divisor of all of them, e.g. ``['C','H','H','H','O','C','H','H','H','O','O','O']`` will return ``'CH3O2'`` * 'reduce': group repeated symbols e.g. ``['Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'Ti', 'O', 'O', 'O']`` will return ``'BaTiO3BaTiO3BaTi2O3'`` * 'group': will try to group as much as possible parts of the formula e.g. ``['Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'Ti', 'O', 'O', 'O']`` will return ``'(BaTiO3)2BaTi2O3'`` * 'count': same as hill (i.e. one just counts the number of atoms of each species) without the re-ordering (take the order of the atomic sites), e.g. ``['Ba', 'Ti', 'O', 'O', 'O','Ba', 'Ti', 'O', 'O', 'O']`` will return ``'Ba2Ti2O6'`` * 'count_compact': same as count but the number of atoms for each species is divided by the greatest common divisor of all of them, e.g. ``['Ba', 'Ti', 'O', 'O', 'O','Ba', 'Ti', 'O', 'O', 'O']`` will return ``'BaTiO3'`` :param separator: a string used to concatenate symbols. Default empty. :return: a string with the formula .. note:: in modes reduce, group, count and count_compact, the initial order in which the atoms were appended by the user is used to group and/or order the symbols in the formula """""" if mode == 'group': return get_formula_group(symbol_list, separator=separator) # for hill and count cases, simply count the occurences of each # chemical symbol (with some re-ordering in hill) if mode in ['hill', 'hill_compact']: ordered_symbol_set = list(sorted(set(symbol_list), key=lambda s: {'C': '0', 'H': '1'}.get(s, s))) elif mode in ['count', 'count_compact']: ordered_symbol_indexes = sorted([symbol_list.index(elem) for elem in set(symbol_list)]) ordered_symbol_set = [symbol_list[i] for i in ordered_symbol_indexes] the_symbol_list = [[symbol_list.count(elem), elem] for elem in ordered_symbol_set] elif mode == 'reduce': the_symbol_list = group_symbols(symbol_list) else: raise ValueError('Mode should be hill, hill_compact, group, reduce, count or count_compact') if mode in ['hill_compact', 'count_compact']: from math import gcd the_gcd = functools.reduce(gcd, [e[0] for e in the_symbol_list]) the_symbol_list = [[e[0] // the_gcd, e[1]] for e in the_symbol_list] return get_formula_from_symbol_list(the_symbol_list, separator=separator) " 42655,"def trade_from_conversion(trade_a: Dict[str, Any], trade_b: Dict[str, Any]) -> Optional[Trade]: """"""Turn information from a conversion into a trade Mary raise: - UnknownAsset due to Asset instantiation - DeserializationError due to unexpected format of dict entries - KeyError due to dict entires missing an expected entry """""" # Check that the status is complete if trade_a['status'] != 'completed': return None # Trade b will represent the asset we are converting to if trade_b['amount']['amount'].startswith('-'): trade_a, trade_b = trade_b, trade_a timestamp = deserialize_timestamp_from_date(trade_a['updated_at'], 'iso8601', 'coinbase') tx_amount = AssetAmount(abs(deserialize_asset_amount(trade_a['amount']['amount']))) tx_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp) native_amount = deserialize_asset_amount(trade_b['amount']['amount']) native_asset = asset_from_coinbase(trade_b['amount']['currency'], time=timestamp) amount = tx_amount # The rate is how much you get/give in quotecurrency if you buy/sell 1 unit of base currency rate = Price(native_amount / tx_amount) # Obtain fee amount in the native currency using data from both trades amount_after_fee = deserialize_asset_amount(trade_b['native_amount']['amount']) amount_before_fee = deserialize_asset_amount(trade_a['native_amount']['amount']) # amount_after_fee + amount_before_fee is a negative amount and the fee needs to be positive conversion_native_fee_amount = abs(amount_after_fee + amount_before_fee) if ZERO not in (tx_amount, conversion_native_fee_amount, amount_before_fee): # To get the asset in wich the fee is nominated we pay attention to the creation # date of each event. As per hour hypothesis the fee is nominated in the asset # for wich the first transaction part was intialized time_created_a = deserialize_timestamp_from_date( date=trade_a['created_at'], formatstr='iso8601', location='coinbase', ) time_created_b = deserialize_timestamp_from_date( date=trade_b['created_at'], formatstr='iso8601', location='coinbase', ) if time_created_a < time_created_b: # We have the fee amount in the native currency. To get it in the # converted asset we have to get the rate asset_native_rate = tx_amount / abs(amount_before_fee) fee_amount = Fee(conversion_native_fee_amount * asset_native_rate) fee_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp) else: trade_b_amount = abs(deserialize_asset_amount(trade_b['amount']['amount'])) asset_native_rate = trade_b_amount / abs(amount_after_fee) fee_amount = Fee(conversion_native_fee_amount * asset_native_rate) fee_asset = asset_from_coinbase(trade_b['amount']['currency'], time=timestamp) else: fee_amount = Fee(ZERO) fee_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp) return Trade( timestamp=timestamp, location=Location.COINBASE, # in coinbase you are buying/selling tx_asset for native_asset base_asset=tx_asset, quote_asset=native_asset, trade_type=TradeType.SELL, amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(trade_a['trade']['id']), ) ","def trade_from_conversion(trade_a: Dict[str, Any], trade_b: Dict[str, Any]) -> Optional[Trade]: """"""Turn information from a conversion into a trade Mary raise: - UnknownAsset due to Asset instantiation - DeserializationError due to unexpected format of dict entries - KeyError due to dict entires missing an expected entry """""" # Check that the status is complete if trade_a['status'] != 'completed': return None # Trade b will represent the asset we are converting to if trade_b['amount']['amount'].startswith('-'): trade_a, trade_b = trade_b, trade_a timestamp = deserialize_timestamp_from_date(trade_a['updated_at'], 'iso8601', 'coinbase') tx_amount = AssetAmount(abs(deserialize_asset_amount(trade_a['amount']['amount']))) tx_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp) native_amount = deserialize_asset_amount(trade_b['amount']['amount']) native_asset = asset_from_coinbase(trade_b['amount']['currency'], time=timestamp) amount = tx_amount # The rate is how much you get/give in quotecurrency if you buy/sell 1 unit of base currency rate = Price(native_amount / tx_amount) # Obtain fee amount in the native currency using data from both trades amount_after_fee = deserialize_asset_amount(trade_b['native_amount']['amount']) amount_before_fee = deserialize_asset_amount(trade_a['native_amount']['amount']) # amount_after_fee + amount_before_fee is a negative amount and the fee needs to be positive conversion_native_fee_amount = abs(amount_after_fee + amount_before_fee) if ZERO not in (tx_amount, conversion_native_fee_amount, amount_before_fee): # To get the asset in wich the fee is nominated we pay attention to the creation # date of each event. As per hour hypothesis the fee is nominated in the asset # for which the first transaction part was initialized time_created_a = deserialize_timestamp_from_date( date=trade_a['created_at'], formatstr='iso8601', location='coinbase', ) time_created_b = deserialize_timestamp_from_date( date=trade_b['created_at'], formatstr='iso8601', location='coinbase', ) if time_created_a < time_created_b: # We have the fee amount in the native currency. To get it in the # converted asset we have to get the rate asset_native_rate = tx_amount / abs(amount_before_fee) fee_amount = Fee(conversion_native_fee_amount * asset_native_rate) fee_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp) else: trade_b_amount = abs(deserialize_asset_amount(trade_b['amount']['amount'])) asset_native_rate = trade_b_amount / abs(amount_after_fee) fee_amount = Fee(conversion_native_fee_amount * asset_native_rate) fee_asset = asset_from_coinbase(trade_b['amount']['currency'], time=timestamp) else: fee_amount = Fee(ZERO) fee_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp) return Trade( timestamp=timestamp, location=Location.COINBASE, # in coinbase you are buying/selling tx_asset for native_asset base_asset=tx_asset, quote_asset=native_asset, trade_type=TradeType.SELL, amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(trade_a['trade']['id']), ) " 34574,"def run_locally(args: argparse.Namespace): try: # noinspection PyUnresolvedReferences from rasax.community import local # pytype: disable=import-error except ModuleNotFoundError: from rasa.nlu.components import MissingDependencyException raise MissingDependencyException( f""Rasa X does not seem to be installed, but is needed for this CLI command."" f""You can find more information on how to install Rasa X in local mode"" f""in the documentation: "" f""{DOCS_BASE_URL_RASA_X}/installation-and-setup/install/local-mode"" ) args.rasa_x_port = args.rasa_x_port or DEFAULT_RASA_X_PORT args.port = args.port or DEFAULT_RASA_PORT project_path = ""."" _validate_rasa_x_start(args, project_path) rasa_x_token = generate_rasa_x_token() process = start_rasa_for_local_rasa_x(args, rasa_x_token=rasa_x_token) config_path = _get_config_path(args) telemetry.track_rasa_x_local() # noinspection PyBroadException try: local.main( args, project_path, args.data, token=rasa_x_token, config_path=config_path ) except RasaXTermsError: # User didn't accept the Rasa X terms. pass except Exception: print(traceback.format_exc()) rasa.shared.utils.cli.print_error( ""Sorry, something went wrong (see error above). Make sure to start "" ""Rasa X with valid data and valid domain and config files. Please, "" ""also check any warnings that popped up.\nIf you need help fixing "" ""the issue visit our forum: https://forum.rasa.com/."" ) finally: process.terminate() ","def run_locally(args: argparse.Namespace): try: # noinspection PyUnresolvedReferences from rasax.community import local # pytype: disable=import-error except ModuleNotFoundError: from rasa.nlu.components import MissingDependencyException raise MissingDependencyException( f""Rasa X does not seem to be installed, but it is needed for this CLI command."" f""You can find more information on how to install Rasa X in local mode"" f""in the documentation: "" f""{DOCS_BASE_URL_RASA_X}/installation-and-setup/install/local-mode"" ) args.rasa_x_port = args.rasa_x_port or DEFAULT_RASA_X_PORT args.port = args.port or DEFAULT_RASA_PORT project_path = ""."" _validate_rasa_x_start(args, project_path) rasa_x_token = generate_rasa_x_token() process = start_rasa_for_local_rasa_x(args, rasa_x_token=rasa_x_token) config_path = _get_config_path(args) telemetry.track_rasa_x_local() # noinspection PyBroadException try: local.main( args, project_path, args.data, token=rasa_x_token, config_path=config_path ) except RasaXTermsError: # User didn't accept the Rasa X terms. pass except Exception: print(traceback.format_exc()) rasa.shared.utils.cli.print_error( ""Sorry, something went wrong (see error above). Make sure to start "" ""Rasa X with valid data and valid domain and config files. Please, "" ""also check any warnings that popped up.\nIf you need help fixing "" ""the issue visit our forum: https://forum.rasa.com/."" ) finally: process.terminate() " 52960,"def get_date_from_timestamp(date_string, settings, negative=False): match = RE_SEARCH_TIMESTAMP.search(date_string) if negative: match = RE_SEARCH_NEGATIVE_TIMESTAMP.search(date_string) if match: seconds = int(match.group(1)) millis = int(match.group(2) or 0) micros = int(match.group(3) or 0) date_obj = datetime.fromtimestamp(seconds) date_obj = date_obj.replace(microsecond=millis * 1000 + micros) date_obj = apply_timezone_from_settings(date_obj, settings) return date_obj ","def get_date_from_timestamp(date_string, settings, negative=False): if negative: match = RE_SEARCH_NEGATIVE_TIMESTAMP.search(date_string) else: match = RE_SEARCH_TIMESTAMP.search(date_string) if match: seconds = int(match.group(1)) millis = int(match.group(2) or 0) micros = int(match.group(3) or 0) date_obj = datetime.fromtimestamp(seconds) date_obj = date_obj.replace(microsecond=millis * 1000 + micros) date_obj = apply_timezone_from_settings(date_obj, settings) return date_obj " 53380,"def good_case_issue5102(): """"""Eager binding of cell variable when used as the default value of a keyword-only argument. """""" funs = [] for i in range(5): def func(*, _i=i): print(_i) funs.append(func) def func2(_i=i): print(_i) funs.append(func2) return funs ","def good_case_issue5012(): """"""Eager binding of cell variable when used as the default value of a keyword-only argument. https://github.com/PyCQA/pylint/issues/5012"""""" funs = [] for i in range(5): def func(*, _i=i): print(_i) funs.append(func) def func2(_i=i): print(_i) funs.append(func2) return funs " 48977,"def test_get_service_account(capsys): storage_get_service_account.get_service_account() out, _ = capsys.readouterr() assert ""@gs-project-accounts.iam.gserviceaccount.com"" in out ","def test_get_service_account(capsys): storage_get_service_account.get_service_account() out, _ = capsys.readouterr() assert ""@gs-project-accounts.iam.gserviceaccount.com"" in out " 45187,"def compute_chunksize(df, num_splits, min_block_size=4096, axis=None): if axis is not None: min_block_size /= 2 mem_usage = df.memory_usage().sum() if mem_usage <= min_block_size: df = df.copy() df.index = pandas.RangeIndex(len(df.index)) df.columns = pandas.RangeIndex(len(df.columns)) return df.shape[axis if axis is not None else slice(None)] else: def get_default_chunksize(length): return ( length // num_splits if length % num_splits == 0 else length // num_splits + 1 ) mem_usage_chunksize = math.sqrt(mem_usage // min_block_size) if axis == 0 or axis is None: row_chunksize = get_default_chunksize(len(df.index)) row_chunksize = max(row_chunksize, len(df) // int(mem_usage_chunksize)) if axis == 0: return row_chunksize col_chunksize = get_default_chunksize(len(df.columns)) # adjust mem_usage_chunksize for non-perfect square roots to have better # partitioning mem_usage_chunksize = mem_usage_chunksize if mem_usage_chunksize - int( mem_usage_chunksize) == 0 else mem_usage_chunksize + 1 col_chunksize = max(col_chunksize, len(df.columns) // int(mem_usage_chunksize)) if axis == 1: return col_chunksize return row_chunksize, col_chunksize ","def compute_chunksize(df, num_splits, min_block_size=4096, axis=None): if axis is not None: min_block_size /= 2 mem_usage = df.memory_usage().sum() if mem_usage <= min_block_size: df = df.copy() df.index = pandas.RangeIndex(len(df.index)) df.columns = pandas.RangeIndex(len(df.columns)) return df.shape[axis if axis is not None else slice(None)] else: def get_default_chunksize(length): return ( length // num_splits if length % num_splits == 0 else length // num_splits + 1 ) mem_usage_chunksize = np.sqrt(mem_usage // min_block_size) if axis == 0 or axis is None: row_chunksize = get_default_chunksize(len(df.index)) row_chunksize = max(row_chunksize, len(df) // int(mem_usage_chunksize)) if axis == 0: return row_chunksize col_chunksize = get_default_chunksize(len(df.columns)) # adjust mem_usage_chunksize for non-perfect square roots to have better # partitioning mem_usage_chunksize = mem_usage_chunksize if mem_usage_chunksize - int( mem_usage_chunksize) == 0 else mem_usage_chunksize + 1 col_chunksize = max(col_chunksize, len(df.columns) // int(mem_usage_chunksize)) if axis == 1: return col_chunksize return row_chunksize, col_chunksize " 35161,"def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None, callback=None, atol=None, callback_type=None): """"""Uses Generalized Minimal RESidual iteration to solve ``Ax = b``. Args: A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): The real or complex matrix of the linear system with shape ``(n, n)``. b (cupy.ndarray): Right hand side of the linear system with shape ``(n,)`` or ``(n, 1)``. x0 (cupy.ndarray): Starting guess for the solution. tol (float): Tolerance for convergence. restart (int): Number of iterations between restarts. Larger values increase iteration cost, but may be necessary for convergence. maxiter (int): Maximum number of iterations. M (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Preconditioner for ``A``. The preconditioner should approximate the inverse of ``A``. callback (function): User-specified function to call on every restart. It is called as ``callback(arg)``, where ``arg`` is selected by ``callback_type``. callback_type (str): 'x' or 'pr_norm'. If 'x', the current solution vector is used as an argument of callback function. if `pr_norm`, relative (preconditioned) residual norm is used as an arugment. atol (float): Tolerance for convergence. Returns: tuple: It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is the converged solution and ``info`` provides convergence information. Reference: M. Wang, H. Klie, M. Parashar and H. Sudan, ""Solving Sparse Linear Systems on NVIDIA Tesla GPUs"", ICCS 2009 (2009). .. seealso:: :func:`scipy.sparse.linalg.gmres` """""" if A.ndim != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape: {})'.format(A.shape)) if A.dtype.char not in 'fdFD': raise TypeError('unsupprted dtype (actual: {})'.format(A.dtype)) n = A.shape[0] if not (b.shape == (n,) or b.shape == (n, 1)): raise ValueError('b has incompatible dimensins') b = b.astype(A.dtype).ravel() if n == 0: return cupy.empty_like(b), 0 b_norm = cupy.linalg.norm(b) if b_norm == 0: return b, 0 if atol is None: atol = tol * float(b_norm) else: atol = max(float(atol), tol * float(b_norm)) if x0 is None: x = cupy.zeros((n,), dtype=A.dtype) else: if not (x0.shape == (n,) or x0.shape == (n, 1)): raise ValueError('x0 has incompatible dimensins') x = x0.astype(A.dtype).ravel() if maxiter is None: maxiter = n * 10 if restart is None: restart = 20 restart = min(restart, n) if callback_type is None: callback_type = 'pr_norm' if callback_type not in ('x', 'pr_norm'): raise ValueError('Unknow callback_type: {}'.format(callback_type)) if callback is None: callback_type = None V = cupy.empty((n, restart), dtype=A.dtype, order='F') H = cupy.zeros((restart+1, restart), dtype=A.dtype, order='F') e = numpy.zeros((restart+1,), dtype=A.dtype) matvec, psolve = _make_funcs(A, M) compute_hu = _make_compute_hu(V) iters = 0 while True: mx = psolve(x) r = b - matvec(mx) r_norm = cublas.nrm2(r) if callback_type == 'x': callback(mx) elif callback_type == 'pr_norm' and iters > 0: callback(r_norm / b_norm) if r_norm <= atol or iters >= maxiter: break v = r / r_norm V[:, 0] = v e[0] = r_norm # Arnoldi iteration for j in range(restart): z = psolve(v) u = matvec(z) H[:j+1, j], u = compute_hu(u, j) cublas.nrm2(u, out=H[j+1, j]) if j+1 < restart: v = u / H[j+1, j] V[:, j+1] = v # Note: The least-square solution to equation Hy = e is computed on CPU # because it is faster if tha matrix size is small. ret = scipy.linalg.lstsq(cupy.asnumpy(H), e) y = cupy.array(ret[0]) x += V @ y iters += restart info = 0 if iters == maxiter and not (r_norm <= atol): info = iters return mx, info ","def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None, callback=None, atol=None, callback_type=None): """"""Uses Generalized Minimal RESidual iteration to solve ``Ax = b``. Args: A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): The real or complex matrix of the linear system with shape ``(n, n)``. b (cupy.ndarray): Right hand side of the linear system with shape ``(n,)`` or ``(n, 1)``. x0 (cupy.ndarray): Starting guess for the solution. tol (float): Tolerance for convergence. restart (int): Number of iterations between restarts. Larger values increase iteration cost, but may be necessary for convergence. maxiter (int): Maximum number of iterations. M (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Preconditioner for ``A``. The preconditioner should approximate the inverse of ``A``. callback (function): User-specified function to call on every restart. It is called as ``callback(arg)``, where ``arg`` is selected by ``callback_type``. callback_type (str): 'x' or 'pr_norm'. If 'x', the current solution vector is used as an argument of callback function. if `pr_norm`, relative (preconditioned) residual norm is used as an arugment. atol (float): Tolerance for convergence. Returns: tuple: It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is the converged solution and ``info`` provides convergence information. Reference: M. Wang, H. Klie, M. Parashar and H. Sudan, ""Solving Sparse Linear Systems on NVIDIA Tesla GPUs"", ICCS 2009 (2009). .. seealso:: :func:`scipy.sparse.linalg.gmres` """""" if A.ndim != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape: {})'.format(A.shape)) if A.dtype.char not in 'fdFD': raise TypeError('unsupprted dtype (actual: {})'.format(A.dtype)) n = A.shape[0] if not (b.shape == (n,) or b.shape == (n, 1)): raise ValueError('b has incompatible dimensins') b = b.astype(A.dtype).ravel() if n == 0: return cupy.empty_like(b), 0 b_norm = cupy.linalg.norm(b) if b_norm == 0: return b, 0 if atol is None: atol = tol * float(b_norm) else: atol = max(float(atol), tol * float(b_norm)) if x0 is None: x = cupy.zeros((n,), dtype=A.dtype) else: if not (x0.shape == (n,) or x0.shape == (n, 1)): raise ValueError('x0 has incompatible dimensins') x = x0.astype(A.dtype).ravel() if maxiter is None: maxiter = n * 10 if restart is None: restart = 20 restart = min(restart, n) if callback_type is None: callback_type = 'pr_norm' if callback_type not in ('x', 'pr_norm'): raise ValueError('Unknown callback_type: {}'.format(callback_type)) if callback is None: callback_type = None V = cupy.empty((n, restart), dtype=A.dtype, order='F') H = cupy.zeros((restart+1, restart), dtype=A.dtype, order='F') e = numpy.zeros((restart+1,), dtype=A.dtype) matvec, psolve = _make_funcs(A, M) compute_hu = _make_compute_hu(V) iters = 0 while True: mx = psolve(x) r = b - matvec(mx) r_norm = cublas.nrm2(r) if callback_type == 'x': callback(mx) elif callback_type == 'pr_norm' and iters > 0: callback(r_norm / b_norm) if r_norm <= atol or iters >= maxiter: break v = r / r_norm V[:, 0] = v e[0] = r_norm # Arnoldi iteration for j in range(restart): z = psolve(v) u = matvec(z) H[:j+1, j], u = compute_hu(u, j) cublas.nrm2(u, out=H[j+1, j]) if j+1 < restart: v = u / H[j+1, j] V[:, j+1] = v # Note: The least-square solution to equation Hy = e is computed on CPU # because it is faster if tha matrix size is small. ret = scipy.linalg.lstsq(cupy.asnumpy(H), e) y = cupy.array(ret[0]) x += V @ y iters += restart info = 0 if iters == maxiter and not (r_norm <= atol): info = iters return mx, info " 3322,"def handle_react_python_scenario(react_project: Project, python_project: Project, quick=False): """""" Handles all data population for the React + Python scenario """""" generate_releases([react_project, python_project], quick=quick) generate_alerts(python_project) generate_saved_query(react_project, ""/productstore"", ""Product Store"") with sentry_sdk.start_span(op=""handle_react_python_scenario.populate_sessions""): populate_sessions(react_project, ""sessions/react_unhandled_exception.json"", quick=quick) populate_sessions(python_project, ""sessions/python_unhandled_exception.json"", quick=quick) with sentry_sdk.start_span(op=""handle_react_python_scenario.populate_connected_events""): populate_connected_event_scenario_1(react_project, python_project, quick=quick) populate_connected_event_scenario_1b(react_project, python_project, quick=quick) populate_connected_event_scenario_2(react_project, python_project, quick=quick) populate_connected_event_scenario_3(python_project, quick=quick) ","def handle_react_python_scenario(react_project: Project, python_project: Project, quick=False): """""" Handles all data population for the React + Python scenario """""" generate_releases([react_project, python_project], quick=quick) generate_alerts(python_project) generate_saved_query(react_project, ""/productstore"", ""Product Store"") with sentry_sdk.start_span(op=""handle_react_python_scenario"", description=""populate_sessions""): populate_sessions(react_project, ""sessions/react_unhandled_exception.json"", quick=quick) populate_sessions(python_project, ""sessions/python_unhandled_exception.json"", quick=quick) with sentry_sdk.start_span(op=""handle_react_python_scenario.populate_connected_events""): populate_connected_event_scenario_1(react_project, python_project, quick=quick) populate_connected_event_scenario_1b(react_project, python_project, quick=quick) populate_connected_event_scenario_2(react_project, python_project, quick=quick) populate_connected_event_scenario_3(python_project, quick=quick) " 37954,"def fmt_docstring(module_func): r"""""" Decorator to insert common text into module docstrings. Should be the last decorator (at the top). Use any of these placeholders in your docstring to have them substituted: * ``{aliases}``: Insert a section listing the parameter aliases defined by decorator ``use_alias``. The following are places for common parameter descriptions: * ``{R}``: region (bounding box as west, east, south, north) * ``{J}``: projection (coordinate system to use) * ``{B}``: frame (map frame and axes parameters) * ``{U}``: timestamp (insert time stamp logo) * ``{CPT}``: cmap (the color palette table) * ``{G}``: color * ``{W}``: pen * ``{n}``: interpolation Parameters ---------- module_func : function The module function. Returns ------- module_func The same *module_func* but with the docstring formatted. Examples -------- >>> @fmt_docstring ... @use_alias(R=""region"", J=""projection"") ... def gmtinfo(**kwargs): ... ''' ... My nice module. ... ... Parameters ... ---------- ... {R} ... {J} ... ... {aliases} ... ''' ... pass >>> print(gmtinfo.__doc__) My nice module. Parameters ---------- region : str or list *Required if this is the first plot command*. *xmin/xmax/ymin/ymax*\ [**+r**][**+u**\ *unit*]. Specify the region of interest. Select map :doc:`region `. projection : str *Required if this is the first plot command*. *projcode*\[*projparams*/]\ *width*. Select map :doc:`projection `. frame : bool or str or list Set map boundary frame and axes attributes. Select map :doc:`frame `. **Aliases:** - J = projection - R = region """""" filler_text = {} if hasattr(module_func, ""aliases""): aliases = [""**Aliases:**\n""] for arg in sorted(module_func.aliases): alias = module_func.aliases[arg] aliases.append(""- {} = {}"".format(arg, alias)) filler_text[""aliases""] = ""\n"".join(aliases) for marker, text in COMMON_OPTIONS.items(): # Remove the indentation and the first line break from the multiline # strings so that it doesn't mess up the original docstring filler_text[marker] = textwrap.dedent(text.lstrip(""\n"")) # Dedent the docstring to make it all match the option text. docstring = textwrap.dedent(module_func.__doc__) module_func.__doc__ = docstring.format(**filler_text) return module_func ","def fmt_docstring(module_func): r"""""" Decorator to insert common text into module docstrings. Should be the last decorator (at the top). Use any of these placeholders in your docstring to have them substituted: * ``{aliases}``: Insert a section listing the parameter aliases defined by decorator ``use_alias``. The following are places for common parameter descriptions: * ``{R}``: region (bounding box as west, east, south, north) * ``{J}``: projection (coordinate system to use) * ``{B}``: frame (map frame and axes parameters) * ``{U}``: timestamp (insert time stamp logo) * ``{CPT}``: cmap (the color palette table) * ``{G}``: color * ``{W}``: pen * ``{n}``: interpolation Parameters ---------- module_func : function The module function. Returns ------- module_func The same *module_func* but with the docstring formatted. Examples -------- >>> @fmt_docstring ... @use_alias(R=""region"", J=""projection"") ... def gmtinfo(**kwargs): ... ''' ... My nice module. ... ... Parameters ... ---------- ... {R} ... {J} ... ... {aliases} ... ''' ... pass >>> print(gmtinfo.__doc__) My nice module. Parameters ---------- region : str or list *Required if this is the first plot command*. *xmin/xmax/ymin/ymax*\ [**+r**][**+u**\ *unit*]. Specify the :doc:`region ` of interest. projection : str *Required if this is the first plot command*. *projcode*\[*projparams*/]\ *width*. Select map :doc:`projection `. frame : bool or str or list Set map boundary frame and axes attributes. Select map :doc:`frame `. **Aliases:** - J = projection - R = region """""" filler_text = {} if hasattr(module_func, ""aliases""): aliases = [""**Aliases:**\n""] for arg in sorted(module_func.aliases): alias = module_func.aliases[arg] aliases.append(""- {} = {}"".format(arg, alias)) filler_text[""aliases""] = ""\n"".join(aliases) for marker, text in COMMON_OPTIONS.items(): # Remove the indentation and the first line break from the multiline # strings so that it doesn't mess up the original docstring filler_text[marker] = textwrap.dedent(text.lstrip(""\n"")) # Dedent the docstring to make it all match the option text. docstring = textwrap.dedent(module_func.__doc__) module_func.__doc__ = docstring.format(**filler_text) return module_func " 50557,"def _read_file(filename, bbox=None, mask=None, rows=None, chunksize=None, **kwargs): """""" Returns a GeoDataFrame from a file or URL. .. versionadded:: 0.7.0 mask, rows Parameters ---------- filename : str, path object or file-like object Either the absolute or relative path to the file or URL to be opened, or any object with a read() method (such as an open file or StringIO) bbox : tuple | GeoDataFrame or GeoSeries | shapely Geometry, default None Filter features by given bounding box, GeoSeries, GeoDataFrame or a shapely geometry. CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame. Tuple is (minx, miny, maxx, maxy) to match the bounds property of shapely geometry objects. Cannot be used with mask. mask : dict | GeoDataFrame or GeoSeries | shapely Geometry, default None Filter for features that intersect with the given dict-like geojson geometry, GeoSeries, GeoDataFrame or shapely geometry. CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame. Cannot be used with bbox. rows : int or slice, default None Load in specific rows by passing an integer (first `n` rows) or a slice() object. chunksize : int, default None Return an iterator yielding GeoDataFrames of up to `n` rows at a time. **kwargs : Keyword args to be passed to the `open` or `BytesCollection` method in the fiona library when opening the file. For more information on possible keywords, type: ``import fiona; help(fiona.open)`` Examples -------- >>> df = geopandas.read_file(""nybb.shp"") # doctest: +SKIP Specifying layer of GPKG: >>> df = geopandas.read_file(""file.gpkg"", layer='cities') # doctest: +SKIP Reading only first 10 rows: >>> df = geopandas.read_file(""nybb.shp"", rows=10) # doctest: +SKIP Reading only geometries intersecting ``mask``: >>> df = geopandas.read_file(""nybb.shp"", mask=polygon) # doctest: +SKIP Reading only geometries intersecting ``bbox``: >>> df = geopandas.read_file(""nybb.shp"", bbox=(0, 10, 0, 20)) # doctest: +SKIP Returns ------- :obj:`geopandas.GeoDataFrame` or :obj:`pandas.DataFrame` : If `ignore_geometry=True` a :obj:`pandas.DataFrame` will be returned. Notes ----- The format drivers will attempt to detect the encoding of your data, but may fail. In this case, the proper encoding can be specified explicitly by using the encoding keyword parameter, e.g. ``encoding='utf-8'``. """""" _check_fiona(""'read_file' function"") if _is_url(filename): req = _urlopen(filename) path_or_bytes = req.read() reader = fiona.BytesCollection elif pd.api.types.is_file_like(filename): data = filename.read() path_or_bytes = data.encode(""utf-8"") if isinstance(data, str) else data reader = fiona.BytesCollection else: # Opening a file via URL or file-like-object above automatically detects a # zipped file. In order to match that behavior, attempt to add a zip scheme # if missing. if _is_zip(str(filename)): parsed = fiona.parse_path(str(filename)) if isinstance(parsed, fiona.path.ParsedPath): # If fiona is able to parse the path, we can safely look at the scheme # and update it to have a zip scheme if necessary. schemes = (parsed.scheme or """").split(""+"") if ""zip"" not in schemes: parsed.scheme = ""+"".join([""zip""] + schemes) filename = parsed.name elif isinstance(parsed, fiona.path.UnparsedPath) and not str( filename ).startswith(""/vsi""): # If fiona is unable to parse the path, it might have a Windows drive # scheme. Try adding zip:// to the front. If the path starts with ""/vsi"" # it is a legacy GDAL path type, so let it pass unmodified. filename = ""zip://"" + parsed.name path_or_bytes = filename reader = fiona.open with fiona_env(): with reader(path_or_bytes, **kwargs) as features: # In a future Fiona release the crs attribute of features will # no longer be a dict, but will behave like a dict. So this should # be forwards compatible crs = ( features.crs[""init""] if features.crs and ""init"" in features.crs else features.crs_wkt ) # handle loading the bounding box if bbox is not None: if isinstance(bbox, (GeoDataFrame, GeoSeries)): bbox = tuple(bbox.to_crs(crs).total_bounds) elif isinstance(bbox, BaseGeometry): bbox = bbox.bounds assert len(bbox) == 4 # handle loading the mask elif isinstance(mask, (GeoDataFrame, GeoSeries)): mask = mapping(mask.to_crs(crs).unary_union) elif isinstance(mask, BaseGeometry): mask = mapping(mask) # restrict load to specific rows if rows is not None: if isinstance(rows, int): rows = slice(rows) elif not isinstance(rows, slice): raise TypeError(""'rows' must be an integer or a slice."") # setup the data loading filter if chunksize: # simple case, load all rows if rows is None: chunk_filters = ( features.filter( n * chunksize, ((n + 1) * chunksize), None, bbox=bbox, mask=mask ) for n in range(0, math.ceil(len(features) / chunksize)) ) # complex case, chunks must remain within specified rows, # and potentially account for a step other than 1 else: start = rows.start or 0 stop = rows.stop or len(features) step = rows.step or 1 chunk_filters = ( features.filter( start + (n * chunksize * step), min(start + ((n + 1) * chunksize * step), stop), rows.step, bbox=bbox, mask=mask, ) for n in range(0, math.ceil((stop - start) / step / chunksize)) ) elif rows is not None: f_filt = features.filter( rows.start, rows.stop, rows.step, bbox=bbox, mask=mask ) elif any((bbox, mask)): f_filt = features.filter(bbox=bbox, mask=mask) else: f_filt = features # get list of columns columns = list(features.schema[""properties""]) if kwargs.get(""ignore_geometry"", False): if chunksize: return ( pd.DataFrame( [record[""properties""] for record in f_filt], columns=columns ) for f_filt in chunk_filters ) return pd.DataFrame( [record[""properties""] for record in f_filt], columns=columns ) if chunksize: return ( GeoDataFrame.from_features( f_filt, crs=crs, columns=columns + [""geometry""] ) for f_filt in chunk_filters ) return GeoDataFrame.from_features( f_filt, crs=crs, columns=columns + [""geometry""] ) ","def _read_file(filename, bbox=None, mask=None, rows=None, chunksize=None, **kwargs): """""" Returns a GeoDataFrame from a file or URL. .. versionadded:: 0.7.0 mask, rows Parameters ---------- filename : str, path object or file-like object Either the absolute or relative path to the file or URL to be opened, or any object with a read() method (such as an open file or StringIO) bbox : tuple | GeoDataFrame or GeoSeries | shapely Geometry, default None Filter features by given bounding box, GeoSeries, GeoDataFrame or a shapely geometry. CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame. Tuple is (minx, miny, maxx, maxy) to match the bounds property of shapely geometry objects. Cannot be used with mask. mask : dict | GeoDataFrame or GeoSeries | shapely Geometry, default None Filter for features that intersect with the given dict-like geojson geometry, GeoSeries, GeoDataFrame or shapely geometry. CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame. Cannot be used with bbox. rows : int or slice, default None Load in specific rows by passing an integer (first `n` rows) or a slice() object. chunksize : int, default None Return an iterator yielding GeoDataFrames of up to `n` rows at a time. **kwargs : Keyword args to be passed to the `open` or `BytesCollection` method in the fiona library when opening the file. For more information on possible keywords, type: ``import fiona; help(fiona.open)`` Examples -------- >>> df = geopandas.read_file(""nybb.shp"") # doctest: +SKIP Specifying layer of GPKG: >>> df = geopandas.read_file(""file.gpkg"", layer='cities') # doctest: +SKIP Reading only first 10 rows: >>> df = geopandas.read_file(""nybb.shp"", rows=10) # doctest: +SKIP Reading only geometries intersecting ``mask``: >>> df = geopandas.read_file(""nybb.shp"", mask=polygon) # doctest: +SKIP Reading only geometries intersecting ``bbox``: >>> df = geopandas.read_file(""nybb.shp"", bbox=(0, 10, 0, 20)) # doctest: +SKIP Returns ------- :obj:`geopandas.GeoDataFrame` or :obj:`pandas.DataFrame` : If `ignore_geometry=True` a :obj:`pandas.DataFrame` will be returned. Notes ----- The format drivers will attempt to detect the encoding of your data, but may fail. In this case, the proper encoding can be specified explicitly by using the encoding keyword parameter, e.g. ``encoding='utf-8'``. """""" _check_fiona(""'read_file' function"") if _is_url(filename): req = _urlopen(filename) path_or_bytes = req.read() reader = fiona.BytesCollection elif pd.api.types.is_file_like(filename): data = filename.read() path_or_bytes = data.encode(""utf-8"") if isinstance(data, str) else data reader = fiona.BytesCollection else: # Opening a file via URL or file-like-object above automatically detects a # zipped file. In order to match that behavior, attempt to add a zip scheme # if missing. if _is_zip(str(filename)): parsed = fiona.parse_path(str(filename)) if isinstance(parsed, fiona.path.ParsedPath): # If fiona is able to parse the path, we can safely look at the scheme # and update it to have a zip scheme if necessary. schemes = (parsed.scheme or """").split(""+"") if ""zip"" not in schemes: parsed.scheme = ""+"".join([""zip""] + schemes) filename = parsed.name elif isinstance(parsed, fiona.path.UnparsedPath) and not str( filename ).startswith(""/vsi""): # If fiona is unable to parse the path, it might have a Windows drive # scheme. Try adding zip:// to the front. If the path starts with ""/vsi"" # it is a legacy GDAL path type, so let it pass unmodified. filename = ""zip://"" + parsed.name path_or_bytes = filename reader = fiona.open with fiona_env(): with reader(path_or_bytes, **kwargs) as features: # In a future Fiona release the crs attribute of features will # no longer be a dict, but will behave like a dict. So this should # be forwards compatible crs = ( features.crs[""init""] if features.crs and ""init"" in features.crs else features.crs_wkt ) # handle loading the bounding box if bbox is not None: if isinstance(bbox, (GeoDataFrame, GeoSeries)): bbox = tuple(bbox.to_crs(crs).total_bounds) elif isinstance(bbox, BaseGeometry): bbox = bbox.bounds assert len(bbox) == 4 # handle loading the mask elif isinstance(mask, (GeoDataFrame, GeoSeries)): mask = mapping(mask.to_crs(crs).unary_union) elif isinstance(mask, BaseGeometry): mask = mapping(mask) # restrict load to specific rows if rows is not None: if isinstance(rows, int): rows = slice(rows) elif not isinstance(rows, slice): raise TypeError(""'rows' must be an integer or a slice."") # setup the data loading filter if chunksize: # simple case, load all rows if rows is None: chunk_filters = ( features.filter( n * chunksize, ((n + 1) * chunksize), bbox=bbox, mask=mask ) for n in range(0, math.ceil(len(features) / chunksize)) ) # complex case, chunks must remain within specified rows, # and potentially account for a step other than 1 else: start = rows.start or 0 stop = rows.stop or len(features) step = rows.step or 1 chunk_filters = ( features.filter( start + (n * chunksize * step), min(start + ((n + 1) * chunksize * step), stop), rows.step, bbox=bbox, mask=mask, ) for n in range(0, math.ceil((stop - start) / step / chunksize)) ) elif rows is not None: f_filt = features.filter( rows.start, rows.stop, rows.step, bbox=bbox, mask=mask ) elif any((bbox, mask)): f_filt = features.filter(bbox=bbox, mask=mask) else: f_filt = features # get list of columns columns = list(features.schema[""properties""]) if kwargs.get(""ignore_geometry"", False): if chunksize: return ( pd.DataFrame( [record[""properties""] for record in f_filt], columns=columns ) for f_filt in chunk_filters ) return pd.DataFrame( [record[""properties""] for record in f_filt], columns=columns ) if chunksize: return ( GeoDataFrame.from_features( f_filt, crs=crs, columns=columns + [""geometry""] ) for f_filt in chunk_filters ) return GeoDataFrame.from_features( f_filt, crs=crs, columns=columns + [""geometry""] ) " 5331,"def check_render_pipe_str(pipestr, renderers, blacklist, whitelist): ''' Check that all renderers specified in the pipe string are available. If so, return the list of render functions in the pipe as (render_func, arg_str) tuples; otherwise return []. ''' if pipestr is None: return [] parts = [r.strip() for r in pipestr.split('|')] # Note: currently, | is not allowed anywhere in the shebang line except # as pipes between renderers. results = [] try: if parts[0] == pipestr and pipestr in OLD_STYLE_RENDERERS: parts = OLD_STYLE_RENDERERS[pipestr].split('|') if len(parts) > 1 and 'py' in parts[1:]: log.warning( 'The ""py"" renderer ignores previously processed results. ' 'The ""#!%s"" pipeline would be equivalent to just ""#!%s""', pipestr, '|'.join(parts[parts.index('py'):]) ) for part in parts: name, argline = (part + ' ').split(' ', 1) if whitelist and name not in whitelist or \ blacklist and name in blacklist: log.warning( 'The renderer ""%s"" is disallowed by configuration and ' 'will be skipped.', name ) continue results.append((renderers[name], argline.strip())) return results except KeyError: log.error('The renderer ""%s"" is not available', pipestr) return [] ","def check_render_pipe_str(pipestr, renderers, blacklist, whitelist): ''' Check that all renderers specified in the pipe string are available. If so, return the list of render functions in the pipe as (render_func, arg_str) tuples; otherwise return []. ''' if pipestr is None: return [] parts = [r.strip() for r in pipestr.split('|')] # Note: currently, | is not allowed anywhere in the shebang line except # as pipes between renderers. results = [] try: if parts[0] == pipestr and pipestr in OLD_STYLE_RENDERERS: parts = OLD_STYLE_RENDERERS[pipestr].split('|') if 'py' in parts[1:]: log.warning( 'The ""py"" renderer ignores previously processed results. ' 'The ""#!%s"" pipeline would be equivalent to just ""#!%s""', pipestr, '|'.join(parts[parts.index('py'):]) ) for part in parts: name, argline = (part + ' ').split(' ', 1) if whitelist and name not in whitelist or \ blacklist and name in blacklist: log.warning( 'The renderer ""%s"" is disallowed by configuration and ' 'will be skipped.', name ) continue results.append((renderers[name], argline.strip())) return results except KeyError: log.error('The renderer ""%s"" is not available', pipestr) return [] " 6512,"def get_wishlist_items(): if frappe.db.exists(""Wishlist"", frappe.session.user): return frappe.db.sql("""""" Select item_code, item_name, website_item, price, warehouse, image, item_group, route, formatted_price from `tabWishlist Items` where parent=%(user)s"""""" % {""user"": frappe.db.escape(frappe.session.user)}, as_dict=1) return","def get_wishlist_items(): if frappe.db.exists(""Wishlist"", frappe.session.user): return frappe.db.sql("""""" Select item_code, item_name, website_item, price, warehouse, image, item_group, route, formatted_price from `tabWishlist Items` where parent=%(user)s"""""", {""user"": frappe.session.user}, as_dict=1) return" 1013,"def test_print_method_weird(): class TextMagicHat(object): def __getattr__(self, key): return key f = HTMLFormatter() text_hat = TextMagicHat() assert text_hat._repr_html_ == ""_repr_html_"" with capture_output() as captured: result = f(text_hat) nt.assert_is(result, None) nt.assert_not_in(""FormatterWarning"", captured.stderr) class CallableMagicHat(object): def __getattr__(self, key): return lambda : key call_hat = CallableMagicHat() with capture_output() as captured: result = f(call_hat) assert result == None class BadReprArgs(object): def _repr_html_(self, extra, args): return ""html"" bad = BadReprArgs() with capture_output() as captured: result = f(bad) nt.assert_is(result, None) nt.assert_not_in(""FormatterWarning"", captured.stderr) ","def test_print_method_weird(): class TextMagicHat(object): def __getattr__(self, key): return key f = HTMLFormatter() text_hat = TextMagicHat() assert text_hat._repr_html_ == ""_repr_html_"" with capture_output() as captured: result = f(text_hat) nt.assert_is(result, None) nt.assert_not_in(""FormatterWarning"", captured.stderr) class CallableMagicHat(object): def __getattr__(self, key): return lambda : key call_hat = CallableMagicHat() with capture_output() as captured: result = f(call_hat) assert result is None class BadReprArgs(object): def _repr_html_(self, extra, args): return ""html"" bad = BadReprArgs() with capture_output() as captured: result = f(bad) nt.assert_is(result, None) nt.assert_not_in(""FormatterWarning"", captured.stderr) " 35230,"def random(m, n, density=0.01, format='coo', dtype=None, random_state=None, data_rvs=None): """"""Generates a random sparse matrix. This function generates a random sparse matrix. First it selects non-zero elements with given density ``density`` from ``(m, n)`` elements. So the number of non-zero elements ``k`` is ``k = m * n * density``. Value of each element is selected with ``data_rvs`` function. Args: m (int): Number of rows. n (int): Number of cols. density (float): Ratio of non-zero entries. format (str): Matrix format. dtype (~cupy.dtype): Type of the returned matrix values. random_state (cupy.random.RandomState or int): State of random number generator. If an integer is given, the method makes a new state for random number generator and uses it. If it is not given, the default state is used. This state is used to generate random indexes for nonzero entries. data_rvs (callable): A function to generate data for a random matrix. If it is not given, `random_state.rand` is used. Returns: cupyx.scipy.sparse.spmatrix: Generated matrix. .. seealso:: :func:`scipy.sparse.random` """""" if density < 0 or density > 1: raise ValueError('density expected to be 0 <= density <= 1') dtype = cupy.dtype(dtype) if dtype.char not in 'fd': raise NotImplementedError('type %s not supported' % dtype) mn = m * n k = int(density * m * n) if random_state is None: random_state = cupy.random elif isinstance(random_state, (int, cupy.integer)): random_state = cupy.random.RandomState(random_state) if data_rvs is None: data_rvs = random_state.rand ind = random_state.choice(mn, size=k, replace=False) j = ind//m i = ind - j * m vals = data_rvs(k).astype(dtype) return coo.coo_matrix( (vals, (i, j)), shape=(m, n)).asformat(format) ","def random(m, n, density=0.01, format='coo', dtype=None, random_state=None, data_rvs=None): """"""Generates a random sparse matrix. This function generates a random sparse matrix. First it selects non-zero elements with given density ``density`` from ``(m, n)`` elements. So the number of non-zero elements ``k`` is ``k = m * n * density``. Value of each element is selected with ``data_rvs`` function. Args: m (int): Number of rows. n (int): Number of cols. density (float): Ratio of non-zero entries. format (str): Matrix format. dtype (~cupy.dtype): Type of the returned matrix values. random_state (cupy.random.RandomState or int): State of random number generator. If an integer is given, the method makes a new state for random number generator and uses it. If it is not given, the default state is used. This state is used to generate random indexes for nonzero entries. data_rvs (callable): A function to generate data for a random matrix. If it is not given, `random_state.rand` is used. Returns: cupyx.scipy.sparse.spmatrix: Generated matrix. .. seealso:: :func:`scipy.sparse.random` """""" if density < 0 or density > 1: raise ValueError('density expected to be 0 <= density <= 1') dtype = cupy.dtype(dtype) if dtype.char not in 'fd': raise NotImplementedError('type %s not supported' % dtype) mn = m * n k = int(density * m * n) if random_state is None: random_state = cupy.random elif isinstance(random_state, (int, cupy.integer)): random_state = cupy.random.RandomState(random_state) if data_rvs is None: data_rvs = random_state.rand ind = random_state.choice(mn, size=k, replace=False) j = ind // m i = ind - j * m vals = data_rvs(k).astype(dtype) return coo.coo_matrix( (vals, (i, j)), shape=(m, n)).asformat(format) " 853,"def _permanent(M): """"""Returns the permanent of a matrix as defined in [1] Examples ======== >>> from sympy import Matrix >>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> M.permanent() 450 References ========== 1. Prof. Frank Ben's notes: https://math.berkeley.edu/~bernd/ban275.pdf """""" from itertools import permutations if not M.is_square: raise NonSquareMatrixError() n = M.rows perm_list = list(permutations(range(n))) total = 0 # Computing permanent by summing over all permuatations on (0, ... n-1) # TODO: find a faster way to do this, maybe a sparse method, maybe Gaussian Elim for perm in perm_list: product = 1 for i in range(n): product = product*M[i, perm[i]] total += product return total ","def _permanent(M): """"""Returns the permanent of a matrix as defined in [1] Examples ======== >>> from sympy import Matrix >>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> M.permanent() 450 References ========== .. [1] Prof. Frank Ben's notes: https://math.berkeley.edu/~bernd/ban275.pdf """""" from itertools import permutations if not M.is_square: raise NonSquareMatrixError() n = M.rows perm_list = list(permutations(range(n))) total = 0 # Computing permanent by summing over all permuatations on (0, ... n-1) # TODO: find a faster way to do this, maybe a sparse method, maybe Gaussian Elim for perm in perm_list: product = 1 for i in range(n): product = product*M[i, perm[i]] total += product return total " 50337,"def apply_version_filters(search, urlkwargs): """"""Apply record version filters to search."""""" if request and 'all_versions' in request.values: if((str(request.values['all_versions']) == ""1"") or str(request.values['all_versions'].lower()) == ""true""): urlkwargs.add('all_versions', ""true"") else: urlkwargs.add('all_versions', str(request.values['all_versions'])) else: search = search.filter( Q('term', **{'relations.version.is_last': True})) return (search, urlkwargs) ","def apply_version_filters(search, urlkwargs): """"""Apply record version filters to search."""""" if request and 'all_versions' in request.values: all_versions = request.values['all_versions'] if all_versions is None or str(all_versions).lower() in (""1"", ""true""): urlkwargs.add('all_versions', ""true"") else: urlkwargs.add('all_versions', str(request.values['all_versions'])) else: search = search.filter( Q('term', **{'relations.version.is_last': True})) return (search, urlkwargs) " 23636,"def boland(ghi, zenith, datetime_or_doy, min_cos_zenith=0.065, max_zenith=87): r"""""" Estimate DNI and DHI from GHI using the Boland clearness index model. The Boland model [1]_, [2]_ estimates the diffuse fraction, DF, from global horizontal irradiance, GHI, through an empirical relationship between DF and the ratio of GHI to extraterrestrial irradiance or clearness index, kt. .. math:: \mathit{DF} = \frac{1}{1 + \exp\left(-5 + 8.6 k_t\right)} where :math:`k_t` is the clearness index. Parameters ---------- ghi: numeric Global horizontal irradiance in W/m^2. zenith: numeric True (not refraction-corrected) zenith angles in decimal degrees. datetime_or_doy : int, float, array, pd.DatetimeIndex Day of year or array of days of year e.g. pd.DatetimeIndex.dayofyear, or pd.DatetimeIndex. min_cos_zenith : numeric, default 0.065 Minimum value of cos(zenith) to allow when calculating global clearness index `kt`. Equivalent to zenith = 86.273 degrees. max_zenith : numeric, default 87 Maximum value of zenith to allow in DNI calculation. DNI will be set to 0 for times with zenith values greater than `max_zenith`. Returns ------- data : OrderedDict or DataFrame Contains the following keys/columns: * ``dni``: the modeled direct normal irradiance in W/m^2. * ``dhi``: the modeled diffuse horizontal irradiance in W/m^2. * ``kt``: Ratio of global to extraterrestrial irradiance on a horizontal plane. References ---------- .. [1] John Boland, Lynne Scott, and Mark Luther, Modelling the diffuse fraction of global solar radiation on a horizontal surface, Environmetrics 12(2), pp 103-116, 2001, :doi:`10.1002/1099-095X(200103)12:2%3C103::AID-ENV447%3E3.0.CO;2-2` .. [2] J. Boland, B. Ridley (2008) Models of Diffuse Solar Fraction. In: Badescu V. (eds) Modeling Solar Radiation at the Earth’s Surface. Springer, Berlin, Heidelberg. :doi:`10.1007/978-3-540-77455-6_8` See also -------- dirint disc erbs """""" dni_extra = get_extra_radiation(datetime_or_doy) kt = clearness_index(ghi, zenith, dni_extra, min_cos_zenith=min_cos_zenith, max_clearness_index=1) # Boland equation df = 1.0 / (1.0 + np.exp(-5.0 + 8.6 * kt)) # NOTE: [1] has different coefficients, for different time intervals # 15-min: df = 1 / (1 + exp(8.645 * (kt - 0.613))) # 1-hour: df = 1 / (1 + exp(7.997 * (kt - 0.586))) dhi = df * ghi dni = (ghi - dhi) / tools.cosd(zenith) bad_values = (zenith > max_zenith) | (ghi < 0) | (dni < 0) dni = np.where(bad_values, 0, dni) # ensure that closure relationship remains valid dhi = np.where(bad_values, ghi, dhi) data = OrderedDict() data['dni'] = dni data['dhi'] = dhi data['kt'] = kt if isinstance(datetime_or_doy, pd.DatetimeIndex): data = pd.DataFrame(data, index=datetime_or_doy) return data ","def boland(ghi, zenith, datetime_or_doy, min_cos_zenith=0.065, max_zenith=87): r"""""" Estimate DNI and DHI from GHI using the Boland clearness index model. The Boland model [1]_, [2]_ estimates the diffuse fraction, DF, from global horizontal irradiance, GHI, through an empirical relationship between DF and the ratio of GHI to extraterrestrial irradiance or clearness index, kt. .. math:: \mathit{DF} = \frac{1}{1 + \exp\left(-5 + 8.6 k_t\right)} where :math:`k_t` is the clearness index. Parameters ---------- ghi: numeric Global horizontal irradiance. [W/m^2] zenith: numeric True (not refraction-corrected) zenith angles in decimal degrees. datetime_or_doy : int, float, array, pd.DatetimeIndex Day of year or array of days of year e.g. pd.DatetimeIndex.dayofyear, or pd.DatetimeIndex. min_cos_zenith : numeric, default 0.065 Minimum value of cos(zenith) to allow when calculating global clearness index `kt`. Equivalent to zenith = 86.273 degrees. max_zenith : numeric, default 87 Maximum value of zenith to allow in DNI calculation. DNI will be set to 0 for times with zenith values greater than `max_zenith`. Returns ------- data : OrderedDict or DataFrame Contains the following keys/columns: * ``dni``: the modeled direct normal irradiance in W/m^2. * ``dhi``: the modeled diffuse horizontal irradiance in W/m^2. * ``kt``: Ratio of global to extraterrestrial irradiance on a horizontal plane. References ---------- .. [1] John Boland, Lynne Scott, and Mark Luther, Modelling the diffuse fraction of global solar radiation on a horizontal surface, Environmetrics 12(2), pp 103-116, 2001, :doi:`10.1002/1099-095X(200103)12:2%3C103::AID-ENV447%3E3.0.CO;2-2` .. [2] J. Boland, B. Ridley (2008) Models of Diffuse Solar Fraction. In: Badescu V. (eds) Modeling Solar Radiation at the Earth’s Surface. Springer, Berlin, Heidelberg. :doi:`10.1007/978-3-540-77455-6_8` See also -------- dirint disc erbs """""" dni_extra = get_extra_radiation(datetime_or_doy) kt = clearness_index(ghi, zenith, dni_extra, min_cos_zenith=min_cos_zenith, max_clearness_index=1) # Boland equation df = 1.0 / (1.0 + np.exp(-5.0 + 8.6 * kt)) # NOTE: [1] has different coefficients, for different time intervals # 15-min: df = 1 / (1 + exp(8.645 * (kt - 0.613))) # 1-hour: df = 1 / (1 + exp(7.997 * (kt - 0.586))) dhi = df * ghi dni = (ghi - dhi) / tools.cosd(zenith) bad_values = (zenith > max_zenith) | (ghi < 0) | (dni < 0) dni = np.where(bad_values, 0, dni) # ensure that closure relationship remains valid dhi = np.where(bad_values, ghi, dhi) data = OrderedDict() data['dni'] = dni data['dhi'] = dhi data['kt'] = kt if isinstance(datetime_or_doy, pd.DatetimeIndex): data = pd.DataFrame(data, index=datetime_or_doy) return data " 45994,"def bihome_loss( patch_1: torch.Tensor, patch_2: torch.Tensor, delta_hat_12: torch.Tensor, delta_hat_21: torch.Tensor, triplet_mu: float, loss_network: nn.Module, ) -> torch.Tensor: r""""""biHomE loss implementation. Based on: :cite:`koguciuk2021perceptual` and https://github.com/NeurAI-Lab/biHomE. Args: patch_1: image tensor with shape :math:`(B, C, H, W)` where B = batch size, C = number of classes patch_2: image tensor with shape :math:`(B, C, H, W)` where B = batch size, C = number of classes delta_hat_12: predicted corner differences from image 1 to image 2 with shape :math:`(B, 4, 2)`, where B = batch size. delta_hat_21: predicted corner differences from image 2 to image 1 with shape :math:`(B, 4, 2)`, where B = batch size. triplet_mu: Homography matrix regularization weight. loss_network: loss network used. Return: the computed loss. """""" if not isinstance(patch_1, torch.Tensor): raise TypeError(f""patch_1 type is not a torch.Tensor. Got {type(patch_1)}"") if not len(patch_1.shape) == 4: raise ValueError(f""Invalid input shape of patch_1, we expect BxCxHxW. Got: {patch_1.shape}"") if not isinstance(patch_2, torch.Tensor): raise TypeError(f""patch_2 type is not a torch.Tensor. Got {type(patch_2)}"") if not len(patch_2.shape) == 4: raise ValueError(f""Invalid input shape of patch_2, we expect BxCxHxW. Got: {patch_2.shape}"") if patch_1.shape != patch_2.shape: raise ValueError(f'Expected patch_1 shape ({patch_1.shape}) to match patch_2 shape ({patch_2.shape}).') if not isinstance(delta_hat_12, torch.Tensor): raise TypeError(f""delta_hat_12 type is not a torch.Tensor. Got {type(delta_hat_12)}"") if not len(delta_hat_12.shape) == 3 or not delta_hat_12.shape[1] == 4 or not delta_hat_12.shape[2] == 2: raise ValueError(f""Invalid input shape of delta_hat_12, we expect Bx4x2. Got: {delta_hat_12.shape}"") if not delta_hat_12.size(0) == patch_1.size(0): raise ValueError(f'Expected delta_hat_12 batch_size ({delta_hat_12.size(0)}) to match patch_1 batch size ' f'({patch_1.size(0)}).') if not isinstance(delta_hat_21, torch.Tensor): raise TypeError(f""delta_hat_21 type is not a torch.Tensor. Got {type(delta_hat_21)}"") if not len(delta_hat_21.shape) == 3 or not delta_hat_21.shape[1] == 4 or not delta_hat_21.shape[2] == 2: raise ValueError(f""Invalid input shape of delta_hat_21, we expect Bx4x2. Got: {delta_hat_21.shape}"") if not delta_hat_21.size(0) == patch_1.size(0): raise ValueError(f'Expected delta_hat_21 batch_size ({delta_hat_21.size(0)}) to match patch_1 batch size ' f'({patch_1.size(0)}).') if not isinstance(loss_network, nn.Module): raise TypeError(f""loss_network type is not a str. Got {type(loss_network)}"") # Compute features of both patches patch_1_f = loss_network(patch_1) patch_2_f = loss_network(patch_2) # Warp patch 1 with delta hat_12 patch_1_prime, h1 = _warp(patch_1, delta_hat=delta_hat_12) patch_1_prime_f = loss_network(patch_1_prime) # Warp patch 2 with delta hat_21 patch_2_prime, h2 = _warp(patch_2, delta_hat=delta_hat_21) patch_2_prime_f = loss_network(patch_2_prime) # Create and warp masks patch_1_m = torch.ones_like(patch_1) patch_2_m = torch.ones_like(patch_2) patch_1_m_prime, _ = _warp(patch_1_m, delta_hat=delta_hat_12) patch_2_m_prime, _ = _warp(patch_2_m, delta_hat=delta_hat_21) # Mask size mismatch downsampling _, _, f_h, _ = patch_1_prime_f.shape downsample_factor = patch_1_m.shape[-1] // f_h downsample_layer = torch.nn.AvgPool2d(kernel_size=downsample_factor, stride=downsample_factor, padding=0) patch_1_m = torch.squeeze(downsample_layer(patch_1_m), dim=1) patch_2_m = torch.squeeze(downsample_layer(patch_2_m), dim=1) patch_1_m_prime = torch.squeeze(downsample_layer(patch_1_m_prime), dim=1) patch_2_m_prime = torch.squeeze(downsample_layer(patch_2_m_prime), dim=1) # Triplet Margin Loss l1 = torch.sum(torch.abs(patch_1_prime_f - patch_2_f), dim=1) l2 = torch.sum(torch.abs(patch_1_f - patch_2_prime_f), dim=1) l3 = torch.sum(torch.abs(patch_1_f - patch_2_f), dim=1) ln1_nom = torch.sum(torch.sum(patch_1_m_prime * patch_2_m * (l1 - l3), dim=-1), dim=-1) ln1_den = torch.sum(torch.sum(patch_1_m_prime * patch_2_m, dim=-1), dim=-1) ln1_den = torch.max(ln1_den, torch.ones_like(ln1_den)) ln2_nom = torch.sum(torch.sum(patch_1_m * patch_2_m_prime * (l2 - l3), dim=-1), dim=-1) ln2_den = torch.sum(torch.sum(patch_1_m * patch_2_m_prime, dim=-1), dim=-1) ln2_den = torch.max(ln2_den, torch.ones_like(ln2_den)) ln1 = torch.sum(ln1_nom / ln1_den) ln2 = torch.sum(ln2_nom / ln2_den) # Regularization batch_size = patch_1.size(0) eye = torch.eye(3, dtype=h1.dtype, device=h1.device).unsqueeze(dim=0).repeat(batch_size, 1, 1) ln3 = torch.sum((torch.matmul(h1, h2) - eye) ** 2) * triplet_mu loss = ln1 + ln2 + ln3 return loss ","def bihome_loss( patch_1: torch.Tensor, patch_2: torch.Tensor, delta_hat_12: torch.Tensor, delta_hat_21: torch.Tensor, triplet_mu: float, loss_network: nn.Module, ) -> torch.Tensor: r""""""biHomE loss implementation. Based on: :cite:`koguciuk2021perceptual` and https://github.com/NeurAI-Lab/biHomE. Args: patch_1: image tensor with shape :math:`(B, C, H, W)` where B = batch size, C = number of classes patch_2: image tensor with shape :math:`(B, C, H, W)` where B = batch size, C = number of classes delta_hat_12: predicted corner differences from image 1 to image 2 with shape :math:`(B, 4, 2)`, where B = batch size. delta_hat_21: predicted corner differences from image 2 to image 1 with shape :math:`(B, 4, 2)`, where B = batch size. triplet_mu: Homography matrix regularization weight. loss_network: loss network used. Return: the computed loss. """""" if not isinstance(patch_1, torch.Tensor): raise TypeError(f""patch_1 type is not a torch.Tensor. Got {type(patch_1)}"") if not len(patch_1.shape) == 4: raise ValueError(f""Invalid input shape of patch_1, we expect BxCxHxW. Got: {patch_1.shape}"") if not isinstance(patch_2, torch.Tensor): raise TypeError(f""patch_2 type is not a torch.Tensor. Got {type(patch_2)}"") if not len(patch_2.shape) == 4: raise ValueError(f""Invalid input shape of patch_2, we expect BxCxHxW. Got: {patch_2.shape}"") if patch_1.shape != patch_2.shape: raise ValueError(f'Expected patch_1 shape ({patch_1.shape}) to match patch_2 shape ({patch_2.shape}).') if not isinstance(delta_hat_12, torch.Tensor): raise TypeError(f""delta_hat_12 type is not a torch.Tensor. Got {type(delta_hat_12)}"") if not len(delta_hat_12.shape) == 3 or not delta_hat_12.shape[1] == 4 or not delta_hat_12.shape[2] == 2: raise ValueError(f""Invalid input shape of delta_hat_12, we expect Bx4x2. Got: {delta_hat_12.shape}"") if not delta_hat_12.size(0) == patch_1.size(0): raise ValueError(f'Expected delta_hat_12 batch_size ({delta_hat_12.size(0)}) to match patch_1 batch size ' f'({patch_1.size(0)}).') if not isinstance(delta_hat_21, torch.Tensor): raise TypeError(f""delta_hat_21 type is not a torch.Tensor. Got {type(delta_hat_21)}"") if not len(delta_hat_21.shape) == 3 or not delta_hat_21.shape[1] == 4 or not delta_hat_21.shape[2] == 2: raise ValueError(f""Invalid input shape of delta_hat_21, we expect Bx4x2. Got: {delta_hat_21.shape}"") if not delta_hat_21.size(0) == patch_1.size(0): raise ValueError(f'Expected delta_hat_21 batch_size ({delta_hat_21.size(0)}) to match patch_1 batch size ' f'({patch_1.size(0)}).') if not isinstance(loss_network, nn.Module): raise TypeError(f""loss_network type is not a str. Got {type(loss_network)}"") # Compute features of both patches patch_1_f = loss_network(patch_1) patch_2_f = loss_network(patch_2) # Warp patch 1 with delta hat_12 patch_1_prime, h1 = _warp(patch_1, delta_hat=delta_hat_12) patch_1_prime_f = loss_network(patch_1_prime) # Warp patch 2 with delta hat_21 patch_2_prime, h2 = _warp(patch_2, delta_hat=delta_hat_21) patch_2_prime_f = loss_network(patch_2_prime) # Create and warp masks patch_1_m = torch.ones_like(patch_1) patch_2_m = torch.ones_like(patch_2) patch_1_m_prime, _ = _warp(patch_1_m, delta_hat=delta_hat_12) patch_2_m_prime, _ = _warp(patch_2_m, delta_hat=delta_hat_21) # Mask size mismatch downsampling _, _, f_h, _ = patch_1_prime_f.shape downsample_factor = patch_1_m.shape[-1] // f_h downsample_layer = torch.nn.AvgPool2d(kernel_size=downsample_factor, stride=downsample_factor, padding=0) patch_1_m = torch.squeeze(downsample_layer(patch_1_m), dim=1) patch_2_m = torch.squeeze(downsample_layer(patch_2_m), dim=1) patch_1_m_prime = torch.squeeze(downsample_layer(patch_1_m_prime), dim=1) patch_2_m_prime = torch.squeeze(downsample_layer(patch_2_m_prime), dim=1) # Triplet Margin Loss l1 = torch.sum(torch.abs(patch_1_prime_f - patch_2_f), dim=1) l2 = torch.sum(torch.abs(patch_1_f - patch_2_prime_f), dim=1) l3 = torch.sum(torch.abs(patch_1_f - patch_2_f), dim=1) ln1_nom = torch.sum(torch.sum(patch_1_m_prime * patch_2_m * (l1 - l3), dim=-1), dim=-1) ln1_den = torch.sum(torch.sum(patch_1_m_prime * patch_2_m, dim=-1), dim=-1) ln1_den = torch.max(ln1_den, torch.ones_like(ln1_den)) ln2_nom = torch.sum(torch.sum(patch_1_m * patch_2_m_prime * (l2 - l3), dim=-1), dim=-1) ln2_den = torch.sum(torch.sum(patch_1_m * patch_2_m_prime, dim=-1), dim=-1) ln2_den = torch.max(ln2_den, torch.ones_like(ln2_den)) ln1 = torch.sum(ln1_nom / ln1_den) ln2 = torch.sum(ln2_nom / ln2_den) # Regularization batch_size = patch_1.size(0) eye = kornia.eye_like(3, h1) ln3 = torch.sum((torch.matmul(h1, h2) - eye) ** 2) * triplet_mu loss = ln1 + ln2 + ln3 return loss " 24866,"def get_midpoint(bottomleft: Point, topright: Point) -> Point: """"""Example of a function with missing Google style parameter documentation in the docstring. Args: bottomleft: bottom left point of rectangle topright: top right point of rectangle """""" pass ","def test_non_builtin_annotations_for_returntype_in_google_docstring(bottomleft: Point, topright: Point) -> Point: """"""Example of a function with missing Google style parameter documentation in the docstring. Args: bottomleft: bottom left point of rectangle topright: top right point of rectangle """""" pass " 31379,"def check_if_index_is_updated(index_folder_path: str, content_repo: Any, current_commit_hash: str, previous_commit_hash: str, storage_bucket: Any, is_private_content_updated: bool = False): """""" Checks stored at index.json commit hash and compares it to current commit hash. In case no packs folders were added/modified/deleted, all other steps are not performed. Args: index_folder_path (str): index folder full path. content_repo (git.repo.base.Repo): content repo object. current_commit_hash (str): last commit hash of head. previous_commit_hash (str): the previous commit to diff with storage_bucket: public storage bucket. is_private_content_updated (bool): True if private content updated, False otherwise. """""" skipping_build_task_message = ""Skipping Upload Packs To Marketplace Storage Step."" try: if storage_bucket.name not in (GCPConfig.CI_BUILD_BUCKET, GCPConfig.PRODUCTION_BUCKET): logging.info(""Skipping index update check in non production/build bucket"") return if is_private_content_updated: logging.debug(""Skipping index update ad Private Content has updated."") return if not os.path.exists(os.path.join(index_folder_path, f""{GCPConfig.INDEX_NAME}.json"")): # will happen only in init bucket run logging.warning(f""{GCPConfig.INDEX_NAME}.json not found in {GCPConfig.INDEX_NAME} folder"") return with open(os.path.join(index_folder_path, f""{GCPConfig.INDEX_NAME}.json"")) as index_file: index_json = json.load(index_file) index_commit_hash = index_json.get('commit', previous_commit_hash) try: index_commit = content_repo.commit(index_commit_hash) except Exception: # not updated build will receive this exception because it is missing more updated commit logging.exception(f""Index is already updated. {skipping_build_task_message}"") sys.exit() current_commit = content_repo.commit(current_commit_hash) if current_commit.committed_datetime <= index_commit.committed_datetime: logging.warning( f""Current commit {current_commit.hexsha} committed time: {current_commit.committed_datetime}"") logging.warning(f""Index commit {index_commit.hexsha} committed time: {index_commit.committed_datetime}"") logging.warning(""Index is already updated."") logging.warning(skipping_build_task_message) sys.exit() for changed_file in current_commit.diff(index_commit): if changed_file.a_path.startswith(PACKS_FOLDER): logging.info( f""Found changed packs between index commit {index_commit.hexsha} and {current_commit.hexsha}"") break else: logging.warning(f""No changes found between index commit {index_commit.hexsha} and {current_commit.hexsha}"") logging.warning(skipping_build_task_message) sys.exit() except Exception: logging.exception(""Failed in checking status of index"") sys.exit(1) ","def check_if_index_is_updated(index_folder_path: str, content_repo: Any, current_commit_hash: str, previous_commit_hash: str, storage_bucket: Any, is_private_content_updated: bool = False): """""" Checks stored at index.json commit hash and compares it to current commit hash. In case no packs folders were added/modified/deleted, all other steps are not performed. Args: index_folder_path (str): index folder full path. content_repo (git.repo.base.Repo): content repo object. current_commit_hash (str): last commit hash of head. previous_commit_hash (str): the previous commit to diff with storage_bucket: public storage bucket. is_private_content_updated (bool): True if private content updated, False otherwise. """""" skipping_build_task_message = ""Skipping Upload Packs To Marketplace Storage Step."" try: if storage_bucket.name not in (GCPConfig.CI_BUILD_BUCKET, GCPConfig.PRODUCTION_BUCKET): logging.info(""Skipping index update check in non production/build bucket"") return if is_private_content_updated: logging.debug(""Skipping index update as Private Content has updated."") return if not os.path.exists(os.path.join(index_folder_path, f""{GCPConfig.INDEX_NAME}.json"")): # will happen only in init bucket run logging.warning(f""{GCPConfig.INDEX_NAME}.json not found in {GCPConfig.INDEX_NAME} folder"") return with open(os.path.join(index_folder_path, f""{GCPConfig.INDEX_NAME}.json"")) as index_file: index_json = json.load(index_file) index_commit_hash = index_json.get('commit', previous_commit_hash) try: index_commit = content_repo.commit(index_commit_hash) except Exception: # not updated build will receive this exception because it is missing more updated commit logging.exception(f""Index is already updated. {skipping_build_task_message}"") sys.exit() current_commit = content_repo.commit(current_commit_hash) if current_commit.committed_datetime <= index_commit.committed_datetime: logging.warning( f""Current commit {current_commit.hexsha} committed time: {current_commit.committed_datetime}"") logging.warning(f""Index commit {index_commit.hexsha} committed time: {index_commit.committed_datetime}"") logging.warning(""Index is already updated."") logging.warning(skipping_build_task_message) sys.exit() for changed_file in current_commit.diff(index_commit): if changed_file.a_path.startswith(PACKS_FOLDER): logging.info( f""Found changed packs between index commit {index_commit.hexsha} and {current_commit.hexsha}"") break else: logging.warning(f""No changes found between index commit {index_commit.hexsha} and {current_commit.hexsha}"") logging.warning(skipping_build_task_message) sys.exit() except Exception: logging.exception(""Failed in checking status of index"") sys.exit(1) " 31499,"def add_or_remove_urls_from_category(action, urls, category_data): """""" Add or remove urls from a category. Args: action: The action requested, can be ADD_TO_LIST for adding or REMOVE_FROM_LIST for removing. urls: the list of urls to add or remove from the category category_data: the data of the category as returned from the API Returns: The response as returned from the API """""" cmd_url = '/urlCategories/' + category_data.get('id') + '?action=' + action data = { 'customCategory': category_data.get('customCategory'), 'urls': urls, 'id': category_data.get('id') } if 'description' in category_data: data['description'] = category_data['description'] if 'configuredName' in category_data: data['configuredName'] = category_data['configuredName'] json_data = json.dumps(data) response = http_request('PUT', cmd_url, json_data) return response.json() ","def add_or_remove_urls_from_category(action, urls, category_data): """""" Add or remove urls from a category. Args: action: The action requested, can be 'ADD_TO_LIST' for adding or 'REMOVE_FROM'_LIST for removing. urls: the list of urls to add or remove from the category category_data: the data of the category as returned from the API Returns: The response as returned from the API """""" cmd_url = '/urlCategories/' + category_data.get('id') + '?action=' + action data = { 'customCategory': category_data.get('customCategory'), 'urls': urls, 'id': category_data.get('id') } if 'description' in category_data: data['description'] = category_data['description'] if 'configuredName' in category_data: data['configuredName'] = category_data['configuredName'] json_data = json.dumps(data) response = http_request('PUT', cmd_url, json_data) return response.json() " 32118,"def should_test_content_pack(pack_name: str, marketplace_version: str, id_set: dict) -> Tuple[bool, str]: """"""Checks if content pack should be tested in the build: - Content pack is not in skipped packs - Content pack is certified - Content pack is not deprecated - Content pack is not supported the marketplace_version Args: pack_name (str): The pack name to check if it should be tested marketplace_version (str): id_set (dict): Structure which holds all content entities to extract pack names from. Returns: bool: True if should be tested, False otherwise """""" if not pack_name: return False, 'Invalid pack name' pack_path = os.path.join(PACKS_DIR, pack_name) if pack_name in SKIPPED_PACKS: return False, 'Pack is either the ""NonSupported"" pack or the ""DeprecatedContent"" pack.' if not is_pack_xsoar_supported(pack_path): return False, 'Pack is not XSOAR supported' if is_pack_deprecated(pack_path): return False, 'Pack is Deprecated' if marketplace_version not in get_pack_supported_marketplace_version(pack_name, id_set): return False, 'Pack is not supported in this marketplace version' return True, '' ","def should_test_content_pack(pack_name: str, marketplace_version: str, id_set: dict) -> Tuple[bool, str]: """"""Checks if content pack should be tested in the build: - Content pack is not in skipped packs - Content pack is certified - Content pack is not deprecated - Content pack is not supported the marketplace_version Args: pack_name (str): The pack name to check if it should be tested marketplace_version (str): the marketplace version to collect tests for ('xsoar'/'marketplacev2') id_set (dict): Structure which holds all content entities to extract pack names from. Returns: bool: True if should be tested, False otherwise """""" if not pack_name: return False, 'Invalid pack name' pack_path = os.path.join(PACKS_DIR, pack_name) if pack_name in SKIPPED_PACKS: return False, 'Pack is either the ""NonSupported"" pack or the ""DeprecatedContent"" pack.' if not is_pack_xsoar_supported(pack_path): return False, 'Pack is not XSOAR supported' if is_pack_deprecated(pack_path): return False, 'Pack is Deprecated' if marketplace_version not in get_pack_supported_marketplace_version(pack_name, id_set): return False, 'Pack is not supported in this marketplace version' return True, '' " 8202,"def test_contour_units(simple_map): # Check that contouring with units works as intended simple_map.meta['bunit'] = 'm' # Same units contours = simple_map.contour(1.5 * u.m) assert len(contours) == 1 # Different units, but convertible contours_cm = simple_map.contour(150 * u.cm) for c1, c2 in zip(contours, contours_cm): np.all(c1 == c2) # Percentage contours_percent = simple_map.contour(100 * u.percent) contours_ref = simple_map.contour(np.max(simple_map.data) * simple_map.unit) for c1, c2 in zip(contours_percent, contours_ref): np.all(c1 == c2) with pytest.raises(TypeError, match='The levels argument has no unit attribute'): simple_map.contour(1.5) with pytest.raises(u.UnitsError, match=re.escape(""'s' (time) and 'm' (length) are not convertible"")): simple_map.contour(1.5 * u.s) ","def test_contour_units(simple_map): # Check that contouring with units works as intended simple_map.meta['bunit'] = 'm' # Same units contours = simple_map.contour(1.5 * u.m) assert len(contours) == 1 # Different units, but convertible contours_cm = simple_map.contour(150 * u.cm) for c1, c2 in zip(contours, contours_cm): np.all(c1 == c2) # Percentage contours_percent = simple_map.contour(100 * u.percent) contours_ref = simple_map.contour(np.max(simple_map.data) * simple_map.unit) for c1, c2 in zip(contours_percent, contours_ref): assert np.all(c1 == c2) with pytest.raises(TypeError, match='The levels argument has no unit attribute'): simple_map.contour(1.5) with pytest.raises(u.UnitsError, match=re.escape(""'s' (time) and 'm' (length) are not convertible"")): simple_map.contour(1.5 * u.s) " 14514,"def match_data(g_pool, pupil_list, ref_list): if pupil_list and ref_list: pass else: logger.error(not_enough_data_error_msg) return { ""subject"": ""calibration.failed"", ""reason"": not_enough_data_error_msg, ""timestamp"": g_pool.get_timestamp(), ""record"": True, } # match eye data and check if biocular and or monocular pupil0 = [p for p in pupil_list if p[""id""] == 0] pupil1 = [p for p in pupil_list if p[""id""] == 1] # TODO unify this and don't do both matched_binocular_data = closest_matches_binocular(ref_list, pupil_list) matched_pupil0_data = closest_matches_monocular(ref_list, pupil0) matched_pupil1_data = closest_matches_monocular(ref_list, pupil1) if len(matched_pupil0_data) > len(matched_pupil1_data): matched_monocular_data = matched_pupil0_data else: matched_monocular_data = matched_pupil1_data logger.info( ""Collected {} monocular calibration data."".format(len(matched_monocular_data)) ) logger.info( ""Collected {} binocular calibration data."".format(len(matched_binocular_data)) ) return ( matched_binocular_data, matched_monocular_data, matched_pupil0_data, matched_pupil1_data, pupil0, pupil1, ) ","def match_data(g_pool, pupil_list, ref_list): if pupil_list and ref_list: pass else: logger.error(not_enough_data_error_msg) return { ""subject"": ""calibration.failed"", ""reason"": not_enough_data_error_msg, ""timestamp"": g_pool.get_timestamp(), ""record"": True, } # match eye data and check if binocular and or monocular pupil0 = [p for p in pupil_list if p[""id""] == 0] pupil1 = [p for p in pupil_list if p[""id""] == 1] # TODO unify this and don't do both matched_binocular_data = closest_matches_binocular(ref_list, pupil_list) matched_pupil0_data = closest_matches_monocular(ref_list, pupil0) matched_pupil1_data = closest_matches_monocular(ref_list, pupil1) if len(matched_pupil0_data) > len(matched_pupil1_data): matched_monocular_data = matched_pupil0_data else: matched_monocular_data = matched_pupil1_data logger.info( ""Collected {} monocular calibration data."".format(len(matched_monocular_data)) ) logger.info( ""Collected {} binocular calibration data."".format(len(matched_binocular_data)) ) return ( matched_binocular_data, matched_monocular_data, matched_pupil0_data, matched_pupil1_data, pupil0, pupil1, ) " 31171,"def get_audit_agent_reports_command(client, args): endpoint_ids = argToList(args.get('endpoint_ids')) endpoint_names = argToList(args.get('endpoint_names')) result = argToList(args.get('result')) _type = argToList(args.get('type')) sub_type = argToList(args.get('sub_type')) timestamp_gte = arg_to_timestamp( arg=args.get('timestamp_gte'), arg_name='timestamp_gte' ) timestamp_lte = arg_to_timestamp( arg=args.get('timestamp_lte'), arg_name='timestamp_lte' ) page_number = arg_to_int( arg=args.get('page', 0), arg_name='Failed to parse ""page"". Must be a number.', required=True ) limit = arg_to_int( arg=args.get('limit', 20), arg_name='Failed to parse ""limit"". Must be a number.', required=True ) search_from = page_number * limit search_to = search_from + limit sort_by = args.get('sort_by') sort_order = args.get('sort_order', 'asc') audit_logs = client.get_audit_agent_reports( endpoint_ids=endpoint_ids, endpoint_names=endpoint_names, result=result, _type=_type, sub_type=sub_type, timestamp_gte=timestamp_gte, timestamp_lte=timestamp_lte, search_from=search_from, search_to=search_to, sort_by=sort_by, sort_order=sort_order ) endpoint_context = create_endpoint_context(audit_logs) return ( tableToMarkdown('Audit Agent Reports', audit_logs), { f'{INTEGRATION_CONTEXT_BRAND}.AuditAgentReports': audit_logs, Common.Endpoint.CONTEXT_PATH: endpoint_context }, audit_logs ) ","def get_audit_agent_reports_command(client, args): endpoint_ids = argToList(args.get('endpoint_ids')) endpoint_names = argToList(args.get('endpoint_names')) result = argToList(args.get('result')) _type = argToList(args.get('type')) sub_type = argToList(args.get('sub_type')) timestamp_gte = arg_to_timestamp( arg=args.get('timestamp_gte'), arg_name='timestamp_gte' ) timestamp_lte = arg_to_timestamp( arg=args.get('timestamp_lte'), arg_name='timestamp_lte' ) page_number = arg_to_int( arg=args.get('page', 0), arg_name='Failed to parse ""page"". Must be a number.', required=True ) limit = arg_to_int( arg=args.get('limit', 20), arg_name='Failed to parse ""limit"". Must be a number.', required=True ) search_from = page_number * limit search_to = search_from + limit sort_by = args.get('sort_by') sort_order = args.get('sort_order', 'asc') audit_logs = client.get_audit_agent_reports( endpoint_ids=endpoint_ids, endpoint_names=endpoint_names, result=result, _type=_type, sub_type=sub_type, timestamp_gte=timestamp_gte, timestamp_lte=timestamp_lte, search_from=search_from, search_to=search_to, sort_by=sort_by, sort_order=sort_order ) endpoint_context = create_endpoint_context(audit_logs) return ( tableToMarkdown('Audit Agent Reports', audit_logs), { f'{INTEGRATION_CONTEXT_BRAND}.AuditAgentReports': audit_logs, Common.Endpoint.CONTEXT_PATH: endpoint_context, }, audit_logs ) " 37656,"def _expand_parameters(circuits, run_config): """"""Verifies that there is a single common set of parameters shared between all circuits and all parameter binds in the run_config. Returns an expanded list of circuits (if parameterized) with all parameters bound, and a copy of the run_config with parameter_binds cleared. If neither the circuits nor the run_config specify parameters, the two are returned unmodified. Raises: QiskitError: if run_config parameters are not compatible with circuit parameters Returns: Tuple(List[QuantumCircuit], RunConfig): - List of input circuits expanded and with parameters bound - RunConfig with parameter_binds removed """""" parameter_binds = run_config.parameter_binds if ( parameter_binds and any(binds for binds in parameter_binds) or any(circuit.parameters for circuit in circuits) ): # Unroll params here in order to handle ParamVects all_bind_parameters = [ QuantumCircuit()._unroll_param_dict(bind).keys() for bind in parameter_binds ] all_circuit_parameters = [circuit.parameters for circuit in circuits] # Collect set of all unique parameters across all circuits and binds unique_parameters = { param for param_list in all_bind_parameters + all_circuit_parameters for param in param_list } # Check that all parameters are common to all circuits and binds if ( not all_bind_parameters or not all_circuit_parameters or any(unique_parameters != bind_params for bind_params in all_bind_parameters) or any(unique_parameters != parameters for parameters in all_circuit_parameters) ): raise QiskitError( ( ""Mismatch between run_config.parameter_binds and all circuit parameters. "" + ""Parameter binds: {} "" + ""Circuit parameters: {}"" ).format(all_bind_parameters, all_circuit_parameters) ) circuits = [ circuit.bind_parameters(binds) for circuit in circuits for binds in parameter_binds ] # All parameters have been expanded and bound, so remove from run_config run_config = copy.deepcopy(run_config) run_config.parameter_binds = [] return circuits, run_config ","def _expand_parameters(circuits, run_config): """"""Verifies that there is a single common set of parameters shared between all circuits and all parameter binds in the run_config. Returns an expanded list of circuits (if parameterized) with all parameters bound, and a copy of the run_config with parameter_binds cleared. If neither the circuits nor the run_config specify parameters, the two are returned unmodified. Raises: QiskitError: if run_config parameters are not compatible with circuit parameters Returns: Tuple(List[QuantumCircuit], RunConfig): - List of input circuits expanded and with parameters bound - RunConfig with parameter_binds removed """""" parameter_binds = run_config.parameter_binds if ( parameter_binds and any(parameter_binds) or any(circuit.parameters for circuit in circuits) ): # Unroll params here in order to handle ParamVects all_bind_parameters = [ QuantumCircuit()._unroll_param_dict(bind).keys() for bind in parameter_binds ] all_circuit_parameters = [circuit.parameters for circuit in circuits] # Collect set of all unique parameters across all circuits and binds unique_parameters = { param for param_list in all_bind_parameters + all_circuit_parameters for param in param_list } # Check that all parameters are common to all circuits and binds if ( not all_bind_parameters or not all_circuit_parameters or any(unique_parameters != bind_params for bind_params in all_bind_parameters) or any(unique_parameters != parameters for parameters in all_circuit_parameters) ): raise QiskitError( ( ""Mismatch between run_config.parameter_binds and all circuit parameters. "" + ""Parameter binds: {} "" + ""Circuit parameters: {}"" ).format(all_bind_parameters, all_circuit_parameters) ) circuits = [ circuit.bind_parameters(binds) for circuit in circuits for binds in parameter_binds ] # All parameters have been expanded and bound, so remove from run_config run_config = copy.deepcopy(run_config) run_config.parameter_binds = [] return circuits, run_config " 30103,"def test_search_containment_abund(runtmp): ""Construct some signatures with abund, make sure that containment complains"" # build minhashes mh1 = MinHash(0, 21, scaled=1, track_abundance=True) mh2 = MinHash(0, 21, scaled=1, track_abundance=True) mh1.add_many((1, 2, 3, 4)) mh1.add_many((1, 2)) mh2.add_many((1, 5)) mh2.add_many((1, 5)) mh2.add_many((1, 5)) # build signatures x = sourmash.SourmashSignature(mh1, name='a') y = sourmash.SourmashSignature(mh2, name='b') # save! with open(runtmp.output('a.sig'), 'wt') as fp: sourmash.save_signatures([x], fp) with open(runtmp.output('b.sig'), 'wt') as fp: sourmash.save_signatures([y], fp) # run sourmash search --containent with pytest.raises(SourmashCommandFailed) as exc: runtmp.sourmash('search', 'a.sig', 'b.sig', '-o', 'xxx.csv', '--containment') assert ""ERROR: cannot do containment searches on an abund signature; maybe specify --ignore-abundance?"" in str(exc) # run sourmash search --max-containment with pytest.raises(SourmashCommandFailed) as exc: runtmp.sourmash('search', 'a.sig', 'b.sig', '-o', 'xxx.csv', '--max-containment') assert ""ERROR: cannot do containment searches on an abund signature; maybe specify --ignore-abundance?"" in str(exc) ","def test_search_containment_abund(runtmp): ""Construct some signatures with abund, make sure that containment complains"" # build minhashes mh1 = MinHash(0, 21, scaled=1, track_abundance=True) mh2 = MinHash(0, 21, scaled=1, track_abundance=True) mh1.add_many((1, 2, 3, 4)) mh1.add_many((1, 2)) mh2.add_many((1, 5)) mh2.add_many((1, 5)) mh2.add_many((1, 5)) # build signatures x = sourmash.SourmashSignature(mh1, name='a') y = sourmash.SourmashSignature(mh2, name='b') # save! with open(runtmp.output('a.sig'), 'wt') as fp: sourmash.save_signatures([x], fp) with open(runtmp.output('b.sig'), 'wt') as fp: sourmash.save_signatures([y], fp) # run sourmash search --containment with pytest.raises(SourmashCommandFailed) as exc: runtmp.sourmash('search', 'a.sig', 'b.sig', '-o', 'xxx.csv', '--containment') assert ""ERROR: cannot do containment searches on an abund signature; maybe specify --ignore-abundance?"" in str(exc) # run sourmash search --max-containment with pytest.raises(SourmashCommandFailed) as exc: runtmp.sourmash('search', 'a.sig', 'b.sig', '-o', 'xxx.csv', '--max-containment') assert ""ERROR: cannot do containment searches on an abund signature; maybe specify --ignore-abundance?"" in str(exc) " 32776,"def traced_init(wrapped, instance, args, kwargs): mw = kwargs.pop('middleware', []) service = os.environ.get('DATADOG_SERVICE_NAME') or 'falcon' distributed_tracing = asbool(get_env('falcon', 'distributed_tracing', default=True)) mw.insert(0, TraceMiddleware(tracer, service, distributed_tracing)) kwargs['middleware'] = mw wrapped(*args, **kwargs) ","def traced_init(wrapped, instance, args, kwargs): mw = kwargs.pop('middleware', []) service = get_env(""service_name"", default=""falcon"") distributed_tracing = asbool(get_env('falcon', 'distributed_tracing', default=True)) mw.insert(0, TraceMiddleware(tracer, service, distributed_tracing)) kwargs['middleware'] = mw wrapped(*args, **kwargs) " 17437,"def mean(array, axis=None, skipna=None, **kwargs): """"""inhouse mean that can handle np.datetime64 or cftime.datetime dtypes"""""" from .common import _contains_cftime_datetimes # The mean over an empty axis shouldn't change the data # See https://github.com/pydata/xarray/issues/4885 if axis == tuple(): return array array = asarray(array) if array.dtype.kind in ""Mm"": offset = _datetime_nanmin(array) # xarray always uses np.datetime64[ns] for np.datetime64 data dtype = ""timedelta64[ns]"" return ( _mean( datetime_to_numeric(array, offset), axis=axis, skipna=skipna, **kwargs ).astype(dtype) + offset ) elif _contains_cftime_datetimes(array): if is_duck_dask_array(array): raise NotImplementedError( ""Computing the mean of an array containing "" ""cftime.datetime objects is not yet implemented on "" ""dask arrays."" ) offset = min(array) timedeltas = datetime_to_numeric(array, offset, datetime_unit=""us"") mean_timedeltas = _mean(timedeltas, axis=axis, skipna=skipna, **kwargs) return _to_pytimedelta(mean_timedeltas, unit=""us"") + offset else: return _mean(array, axis=axis, skipna=skipna, **kwargs) ","def mean(array, axis=None, skipna=None, **kwargs): """"""inhouse mean that can handle np.datetime64 or cftime.datetime dtypes"""""" from .common import _contains_cftime_datetimes # The mean over an empty axis shouldn't change the dtype # See https://github.com/pydata/xarray/issues/4885 if axis == tuple(): return array array = asarray(array) if array.dtype.kind in ""Mm"": offset = _datetime_nanmin(array) # xarray always uses np.datetime64[ns] for np.datetime64 data dtype = ""timedelta64[ns]"" return ( _mean( datetime_to_numeric(array, offset), axis=axis, skipna=skipna, **kwargs ).astype(dtype) + offset ) elif _contains_cftime_datetimes(array): if is_duck_dask_array(array): raise NotImplementedError( ""Computing the mean of an array containing "" ""cftime.datetime objects is not yet implemented on "" ""dask arrays."" ) offset = min(array) timedeltas = datetime_to_numeric(array, offset, datetime_unit=""us"") mean_timedeltas = _mean(timedeltas, axis=axis, skipna=skipna, **kwargs) return _to_pytimedelta(mean_timedeltas, unit=""us"") + offset else: return _mean(array, axis=axis, skipna=skipna, **kwargs) " 8749,"def test_configparser_multi_lines(multi_fakeconfig): # spam assert multi_fakeconfig.spam.eggs == [ 'one', 'two', 'three', 'four', 'and a half', # no-breakline + comma ], 'Comma separated line: ""four"" and ""and a half"" must be separated' assert multi_fakeconfig.spam.bacons == [ 'grilled', 'burn out', 'greasy, fat, and tasty', ] assert multi_fakeconfig.spam.cheese == [ 'cheddar', 'reblochon', 'camembert', ] ","def test_configparser_multi_lines(multi_fakeconfig): # spam assert multi_fakeconfig.spam.eggs == [ 'one', 'two', 'three', 'four', 'and a half', # no-newline + comma ], 'Comma separated line: ""four"" and ""and a half"" must be separated' assert multi_fakeconfig.spam.bacons == [ 'grilled', 'burn out', 'greasy, fat, and tasty', ] assert multi_fakeconfig.spam.cheese == [ 'cheddar', 'reblochon', 'camembert', ] " 20552,"def test_splitext(): assert msct_image.splitext('image.nii') == ('image', '.nii') assert msct_image.splitext('image.nii.gz') == ('image', '.nii.gz') assert msct_image.splitext('folder/image.nii.gz') == (os.path.join('folder', 'image'), '.nii.gz') assert msct_image.splitext('nice.image.nii.gz') == ('nice.image', '.nii.gz') assert msct_image.splitext('nice.folder/image.nii.gz') == (os.path.join('nice.folder', 'image'), '.nii.gz') assert msct_image.splitext('image.tar.gz') == ('image', '.tar.gz') ","def test_splitext(): assert msct_image.splitext('image.nii') == ('image', '.nii') assert msct_image.splitext('image.nii.gz') == ('image', '.nii.gz') assert msct_image.splitext(os.path.join('folder', 'image.nii.gz')) == (os.path.join('folder', 'image'), '.nii.gz') assert msct_image.splitext('nice.image.nii.gz') == ('nice.image', '.nii.gz') assert msct_image.splitext('nice.folder/image.nii.gz') == (os.path.join('nice.folder', 'image'), '.nii.gz') assert msct_image.splitext('image.tar.gz') == ('image', '.tar.gz') " 26399,"def generate_config(context): """""" Entry point for the deployment resources. """""" project_id = context.env['project'] resources = [] i = 0 for role in context.properties['roles']: for member in role['members']: policy_get_name = 'get-iam-policy-{}-{}'.format(project_id, i) resources.append( { 'name': policy_get_name, 'type': 'gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding', 'properties': { 'resource': project_id, 'role': role['role'], 'member': member } } ) i += 1 return {""resources"": resources} ","def generate_config(context): """""" Entry point for the deployment resources. """""" project_id = context.env['project'] resources = [] i = 0 for role in context.properties['roles']: for i, member in enumerate(role['members']): policy_get_name = 'get-iam-policy-{}-{}'.format(project_id, i) resources.append( { 'name': policy_get_name, 'type': 'gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding', 'properties': { 'resource': project_id, 'role': role['role'], 'member': member } } ) i += 1 return {""resources"": resources} " 29576,"def test_serialize_bytes(): for x in [1, ""abc"", np.arange(5), b""ab"" * int(100e6)]: b = serialize_bytes(x) assert isinstance(b, bytes) y = deserialize_bytes(b) assert str(x) == str(y) ","def test_serialize_bytes(): for x in [1, ""abc"", np.arange(5), b""ab"" * int(40e6)]: b = serialize_bytes(x) assert isinstance(b, bytes) y = deserialize_bytes(b) assert str(x) == str(y) " 22187,"def strip_control_characters(s): """"""Strip unicode control characters from a string."""""" return """".join(c for c in unicodify(s) if unicodedata.category(c)[0] != ""C"" or unicodedata.category(c) == ""Cn"") ","def strip_control_characters(s): """"""Strip unicode control characters from a string."""""" return """".join(c for c in unicodify(s) if unicodedata.category(c) != ""Cc"") " 42563,"def test_query_token_with_info(rotkehlchen_api_server): """"""Query DAI token to retrieve basic information"""""" response = requests.get( api_url_for( rotkehlchen_api_server, ""erc20tokeninfo"", ), json={ 'address': string_to_ethereum_address(""0x6B175474E89094C44Da98b954EedeAC495271d0F""), }, ) assert_proper_response_with_result(response) data = response.json() assert data['result']['decimals'] == 18 assert data['result']['symbol'] == 'DAI' assert data['result']['name'] == 'Dai Stablecoin' ","def test_query_token_with_info(rotkehlchen_api_server): """"""Query DAI token to retrieve basic information"""""" response = requests.get( api_url_for( rotkehlchen_api_server, 'erc20tokeninfo', ), json={ 'address': string_to_ethereum_address(""0x6B175474E89094C44Da98b954EedeAC495271d0F""), }, ) assert_proper_response_with_result(response) data = response.json() assert data['result']['decimals'] == 18 assert data['result']['symbol'] == 'DAI' assert data['result']['name'] == 'Dai Stablecoin' " 10665,"def bokeh_dataframe(name, rawtext, text, lineno, inliner, options=None, content=None): """"""Generate an inline visual representations of a single color palette. If evaluating the dataframe HTML repr fails, then a SphinxError is raised to terminate the build. For details on the arguments to this function, consult the Docutils docs: http://docutils.sourceforge.net/docs/howto/rst-roles.html#define-the-role-function """""" module_name, df_name = text.rsplit(""."", 1) try: module = importlib.import_module(module_name) except ImportError: raise SphinxError(f""Unable to generate HTML table for {df_name}: couldn't import module {module_name}"") df = getattr(module, df_name, None) if df is None: raise SphinxError(f""Unable to generate HTML table for {df_name}: no Dataframe {df_name} in module {module_name}"") if not isinstance(df, pd.DataFrame): raise SphinxError(f""{text!r} is not a pandas Dataframe"") node = nodes.raw("""", df.head().to_html(), format=""html"") return [node], [] ","def bokeh_dataframe(name, rawtext, text, lineno, inliner, options=None, content=None): """"""Generate an inline visual representation of a single color palette. If evaluating the dataframe HTML repr fails, then a SphinxError is raised to terminate the build. For details on the arguments to this function, consult the Docutils docs: http://docutils.sourceforge.net/docs/howto/rst-roles.html#define-the-role-function """""" module_name, df_name = text.rsplit(""."", 1) try: module = importlib.import_module(module_name) except ImportError: raise SphinxError(f""Unable to generate HTML table for {df_name}: couldn't import module {module_name}"") df = getattr(module, df_name, None) if df is None: raise SphinxError(f""Unable to generate HTML table for {df_name}: no Dataframe {df_name} in module {module_name}"") if not isinstance(df, pd.DataFrame): raise SphinxError(f""{text!r} is not a pandas Dataframe"") node = nodes.raw("""", df.head().to_html(), format=""html"") return [node], [] " 42820,"def move_git_folder_to_path(source_path, new_path, made_new_config): """""" Moves git folder and .gitignore to the new backup directory. """""" git_dir = os.path.join(source_path, '.git') git_ignore_file = os.path.join(source_path, '.gitignore') try: shutil.move(git_dir, new_path) shutil.move(git_ignore_file, new_path) print(Fore.BLUE + ""Moving git repo to new destination"" + Style.RESET_ALL) except FileNotFoundError as e: if not made_new_config: print(Fore.RED + Style.NORMAL + ""Could not detect {}... will initialize"".format(e.filename) + Style.RESET_ALL) ","def move_git_folder_to_path(source_path, new_path, made_new_config): """""" Moves git folder and .gitignore to the new backup directory. """""" git_dir = os.path.join(source_path, '.git') git_ignore_file = os.path.join(source_path, '.gitignore') try: shutil.move(git_dir, new_path) shutil.move(git_ignore_file, new_path) print(Fore.BLUE + Style.BRIGHT + ""Moving git repo to new destination..."" + Style.RESET_ALL) except FileNotFoundError as e: if not made_new_config: print(Fore.RED + Style.NORMAL + ""Could not detect {}... will initialize"".format(e.filename) + Style.RESET_ALL) " 42104,"def upgrade(): bind = op.get_bind() sa.Enum(IntermediateValueModel.TrialIntermediateValueType).create(bind, checkfirst=True) # MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE # ADD COLUMN ... DEFAULT ""FINITE_OR_NAN""', but seemingly Alembic # does not support such a SQL statement. So first add a column with schema-level # default value setting, then remove it by `batch_op.alter_column()`. with op.batch_alter_table(""trial_intermediate_values"") as batch_op: batch_op.add_column( sa.Column( ""intermediate_value_type"", sa.Enum(""FINITE"", ""INF_POS"", ""INF_NEG"", ""NAN"", name=""floattypeenum""), nullable=False, server_default=""FINITE"", ), ) with op.batch_alter_table(""trial_intermediate_values"") as batch_op: batch_op.alter_column(""intermediate_value_type"", server_default=None) session = orm.Session(bind=bind) try: records = session.query(IntermediateValueModel).all() mapping = [] for r in records: value: float if np.isclose(r.intermediate_value, RDB_MAX_FLOAT) or np.isposinf( r.intermediate_value ): value = np.inf elif np.isclose(r.intermediate_value, RDB_MIN_FLOAT) or np.isneginf( r.intermediate_value ): value = -np.inf elif np.isnan(r.intermediate_value): value = np.nan else: value = r.intermediate_value ( sanitized_value, float_type, ) = IntermediateValueModel._intermediate_value_to_stored_repr(value) mapping.append( { ""trial_intermediate_value_id"": r.trial_intermediate_value_id, ""intermediate_value_type"": float_type, ""intermediate_value"": sanitized_value, } ) session.bulk_update_mappings(IntermediateValueModel, mapping) session.commit() except SQLAlchemyError as e: session.rollback() raise e finally: session.close() ","def upgrade(): bind = op.get_bind() sa.Enum(IntermediateValueModel.TrialIntermediateValueType).create(bind, checkfirst=True) # MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE # ADD COLUMN ... DEFAULT ""FINITE_OR_NAN""', but seemingly Alembic # does not support such a SQL statement. So first add a column with schema-level # default value setting, then remove it by `batch_op.alter_column()`. with op.batch_alter_table(""trial_intermediate_values"") as batch_op: batch_op.add_column( sa.Column( ""intermediate_value_type"", sa.Enum(""FINITE"", ""INF_POS"", ""INF_NEG"", ""NAN"", name=""trialintermediatevaluetype""), nullable=False, server_default=""FINITE"", ), ) with op.batch_alter_table(""trial_intermediate_values"") as batch_op: batch_op.alter_column(""intermediate_value_type"", server_default=None) session = orm.Session(bind=bind) try: records = session.query(IntermediateValueModel).all() mapping = [] for r in records: value: float if np.isclose(r.intermediate_value, RDB_MAX_FLOAT) or np.isposinf( r.intermediate_value ): value = np.inf elif np.isclose(r.intermediate_value, RDB_MIN_FLOAT) or np.isneginf( r.intermediate_value ): value = -np.inf elif np.isnan(r.intermediate_value): value = np.nan else: value = r.intermediate_value ( sanitized_value, float_type, ) = IntermediateValueModel._intermediate_value_to_stored_repr(value) mapping.append( { ""trial_intermediate_value_id"": r.trial_intermediate_value_id, ""intermediate_value_type"": float_type, ""intermediate_value"": sanitized_value, } ) session.bulk_update_mappings(IntermediateValueModel, mapping) session.commit() except SQLAlchemyError as e: session.rollback() raise e finally: session.close() " 58045,"def remove_space_from_args(args): """"""Remove space from args."""""" for arg in args: if isinstance(args[arg], str): args[arg] = args[arg].strip() return args ","def remove_space_from_args(args): """"""Remove space from args."""""" for key in args.keys(): if isinstance(args[key], str): args[key] = args[key].strip() return args " 57578,"def _worker(index, env_fn, pipe, parent_pipe, shared_memory, error_queue): assert shared_memory is None env = env_fn() parent_pipe.close() try: while True: command, data = pipe.recv() if command == ""reset"": observation = env.reset() pipe.send((observation, True)) elif command == ""step"": observation, reward, done, info = env.step(data) if done: observation = env.reset() pipe.send(((observation, reward, done, info), True)) elif command == ""seed"": env.seed(data) pipe.send((None, True)) elif command == ""close"": pipe.send((None, True)) break elif command == ""_call"": name, args, kwargs = data if name in [""reset"", ""step"", ""seed"", ""close""]: raise ValueError( ""Trying to call function `{0}` with "" ""`_call`. Use `{0}` directly instead."".format(name) ) function = getattr(env, name) if callable(function): pipe.send((function(*args, **kwargs), True)) else: pipe.send((function, True)) elif command == ""_setattr"": name, value = data setattr(env, name, value) pipe.send((None, True)) elif command == ""_check_observation_space"": pipe.send((data == env.observation_space, True)) else: raise RuntimeError( ""Received unknown command `{0}`. Must "" ""be one of {`reset`, `step`, `seed`, `close`, `_call`, "" ""`_setattr`, `_check_observation_space`}."".format(command) ) except (KeyboardInterrupt, Exception): error_queue.put((index,) + sys.exc_info()[:2]) pipe.send((None, False)) finally: env.close() ","def _worker(index, env_fn, pipe, parent_pipe, shared_memory, error_queue): assert shared_memory is None env = env_fn() parent_pipe.close() try: while True: command, data = pipe.recv() if command == ""reset"": observation = env.reset() pipe.send((observation, True)) elif command == ""step"": observation, reward, done, info = env.step(data) if done: observation = env.reset() pipe.send(((observation, reward, done, info), True)) elif command == ""seed"": env.seed(data) pipe.send((None, True)) elif command == ""close"": pipe.send((None, True)) break elif command == ""_call"": name, args, kwargs = data if name in [""reset"", ""step"", ""seed"", ""close""]: raise ValueError( f""Trying to call function `{name}` with "" f""`_call`. Use `{name}` directly instead."" ) function = getattr(env, name) if callable(function): pipe.send((function(*args, **kwargs), True)) else: pipe.send((function, True)) elif command == ""_setattr"": name, value = data setattr(env, name, value) pipe.send((None, True)) elif command == ""_check_observation_space"": pipe.send((data == env.observation_space, True)) else: raise RuntimeError( ""Received unknown command `{0}`. Must "" ""be one of {`reset`, `step`, `seed`, `close`, `_call`, "" ""`_setattr`, `_check_observation_space`}."".format(command) ) except (KeyboardInterrupt, Exception): error_queue.put((index,) + sys.exc_info()[:2]) pipe.send((None, False)) finally: env.close() " 11367,"def override_properties_with_keyword_arguments(properties, **kwargs): # type: (PropertiesType, Any) -> None if not kwargs: return for key, _ in kwargs: if key in properties: properties[key] = kwargs.get(key) ","def override_properties_with_keyword_arguments(properties, **kwargs): # type: (PropertiesType, Any) -> None if not kwargs: return for key in kwargs.keys(): if key in properties: properties[key] = kwargs.get(key) " 55408,"def _get_mlflow_install_step(dockerfile_context_dir, mlflow_home): """""" Get docker build commands for installing MLflow given a Docker context dir and optional source directory """""" if mlflow_home: mlflow_dir = _copy_project(src_path=mlflow_home, dst_path=dockerfile_context_dir) return ( ""COPY {mlflow_dir} /opt/mlflow\n"" ""RUN pip install /opt/mlflow\n"" # Temporarily commented out for faster development # ""RUN cd /opt/mlflow/mlflow/java/scoring && "" # ""mvn --batch-mode package -DskipTests && "" # ""mkdir -p /opt/java/jars && "" # ""mv /opt/mlflow/mlflow/java/scoring/target/"" # ""mlflow-scoring-*-with-dependencies.jar /opt/java/jars\n"" ).format(mlflow_dir=mlflow_dir) else: return ( ""RUN pip install mlflow=={version}\n"" # Temporarily commented out for faster development # ""RUN mvn "" # "" --batch-mode dependency:copy"" # "" -Dartifact=org.mlflow:mlflow-scoring:{version}:pom"" # "" -DoutputDirectory=/opt/java\n"" # ""RUN mvn "" # "" --batch-mode dependency:copy"" # "" -Dartifact=org.mlflow:mlflow-scoring:{version}:jar"" # "" -DoutputDirectory=/opt/java/jars\n"" # ""RUN cp /opt/java/mlflow-scoring-{version}.pom /opt/java/pom.xml\n"" # ""RUN cd /opt/java && mvn "" # ""--batch-mode dependency:copy-dependencies -DoutputDirectory=/opt/java/jars\n"" ).format(version=mlflow.version.VERSION) ","def _get_mlflow_install_step(dockerfile_context_dir, mlflow_home): """""" Get docker build commands for installing MLflow given a Docker context dir and optional source directory """""" if mlflow_home: mlflow_dir = _copy_project(src_path=mlflow_home, dst_path=dockerfile_context_dir) return ( ""COPY {mlflow_dir} /opt/mlflow\n"" ""RUN pip install /opt/mlflow\n"" ""RUN cd /opt/mlflow/mlflow/java/scoring && "" ""mvn --batch-mode package -DskipTests && "" ""mkdir -p /opt/java/jars && "" ""mv /opt/mlflow/mlflow/java/scoring/target/"" ""mlflow-scoring-*-with-dependencies.jar /opt/java/jars\n"" ).format(mlflow_dir=mlflow_dir) else: return ( ""RUN pip install mlflow=={version}\n"" # Temporarily commented out for faster development # ""RUN mvn "" # "" --batch-mode dependency:copy"" # "" -Dartifact=org.mlflow:mlflow-scoring:{version}:pom"" # "" -DoutputDirectory=/opt/java\n"" # ""RUN mvn "" # "" --batch-mode dependency:copy"" # "" -Dartifact=org.mlflow:mlflow-scoring:{version}:jar"" # "" -DoutputDirectory=/opt/java/jars\n"" # ""RUN cp /opt/java/mlflow-scoring-{version}.pom /opt/java/pom.xml\n"" # ""RUN cd /opt/java && mvn "" # ""--batch-mode dependency:copy-dependencies -DoutputDirectory=/opt/java/jars\n"" ).format(version=mlflow.version.VERSION) " 57714,"def get_reports_command(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, Any]: report_id_list = argToList(args.get('report_ids', [])) extended = args.get('extended_report', ""False"") screenshot = args.get('get_screenshot', ""false"") artifact = args.get('get_artifact', """") if len(report_id_list) == 0: raise ValueError('report_id(s) not specified') report_list: List[Dict[str, Any]] = [] for report_id in report_id_list: report = client.report_status(report_id=report_id, extended=extended) if screenshot.lower() == ""true"": screenshot = client.report_artifact(report_id=report_id, artifact_type=""screenshot"") stored_img = fileResult('screenshot.gif', screenshot) demisto.results({'Type': entryTypes['image'], 'ContentsFormat': formats['text'], 'File': stored_img['File'], 'FileID': stored_img['FileID'], 'Contents': ''}) if artifact != """": artifacts = client.report_artifact(report_id=report_id, artifact_type=artifact) stored_artifacts = fileResult('artifacts.zip', artifacts) demisto.results({'Type': entryTypes['file'], 'ContentsFormat': formats['text'], 'File': stored_artifacts['File'], 'FileID': stored_artifacts['FileID'], 'Contents': ''}) report_list.append(report) readable_output = tableToMarkdown('Scan status', report_list) outputs = { 'FireEyeDoD.Scan(val.report_id == obj.report_id)': report_list } return ( readable_output, outputs, report_list ) ","def get_reports_command(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, Any]: report_id_list = argToList(args.get('report_ids', [])) extended = args.get('extended_report', ""False"") screenshot = args.get('get_screenshot', ""false"") artifact = args.get('get_artifact', """") if len(report_id_list) == 0: raise ValueError('report_id(s) not specified') report_list: List[Dict[str, Any]] = [] for report_id in report_id_list: report = client.report_status(report_id=report_id, extended=extended) if screenshot.lower() == ""true"": screenshot = client.report_artifact(report_id=report_id, artifact_type=""screenshot"") stored_img = fileResult('screenshot.gif', screenshot) demisto.results({'Type': entryTypes['image'], 'ContentsFormat': formats['text'], 'File': stored_img['File'], 'FileID': stored_img['FileID'], 'Contents': ''}) if artifact != """": artifacts = client.report_artifact(report_id=report_id, artifact_type=artifact) stored_artifacts = fileResult('artifacts.zip', artifacts, entryTypes['entryInfoFile']) demisto.results(stored_artifacts) demisto.results({'Type': entryTypes['file'], 'ContentsFormat': formats['text'], 'File': stored_artifacts['File'], 'FileID': stored_artifacts['FileID'], 'Contents': ''}) report_list.append(report) readable_output = tableToMarkdown('Scan status', report_list) outputs = { 'FireEyeDoD.Scan(val.report_id == obj.report_id)': report_list } return ( readable_output, outputs, report_list ) " 5434,"def test_load_json(): env = Environment(extensions=[SerializerExtension]) rendered = env.from_string( '{% set document = \'{""foo"": ""it works""}\'|load_json %}{{ document.foo }}' ).render() assert rendered == ""it works"" rendered = env.from_string( ""{% set document = document|load_json %}{{ document.foo }}"" ).render(document='{""foo"": ""it works""}') assert rendered == ""it works"" # bad quotes with pytest.raises(exceptions.TemplateRuntimeError): env.from_string(""{{ document|load_json }}"").render( document=""{'foo': 'it works'}"" ) # not a string with pytest.raises(exceptions.TemplateRuntimeError): env.from_string(""{{ document|load_json }}"").render(document={""foo"": ""it works""}) ","def test_load_json(): env = Environment(extensions=[SerializerExtension]) rendered = env.from_string( """"""{% set document = '{""foo"": ""it works""}'|load_json %}{{ document.foo }}"""""" ).render() assert rendered == ""it works"" rendered = env.from_string( ""{% set document = document|load_json %}{{ document.foo }}"" ).render(document='{""foo"": ""it works""}') assert rendered == ""it works"" # bad quotes with pytest.raises(exceptions.TemplateRuntimeError): env.from_string(""{{ document|load_json }}"").render( document=""{'foo': 'it works'}"" ) # not a string with pytest.raises(exceptions.TemplateRuntimeError): env.from_string(""{{ document|load_json }}"").render(document={""foo"": ""it works""}) " 33323,"def account_download_filter(account_type, download_table, filters, account_level=""treasury_account""): if account_level not in (""treasury_account"", ""federal_account""): raise InvalidParameterException( 'Invalid Parameter: account_level must be either ""federal_account"" or ""treasury_account""' ) query_filters = {} tas_id = ""treasury_account_identifier"" if account_type == ""account_balances"" else ""treasury_account"" if filters.get(""agency"") and filters[""agency""] != ""all"": if not ToptierAgency.objects.filter(toptier_agency_id=filters[""agency""]).exists(): raise InvalidParameterException(""Agency with that ID does not exist"") query_filters[f""{tas_id}__funding_toptier_agency_id""] = filters[""agency""] if filters.get(""federal_account"") and filters[""federal_account""] != ""all"": if not FederalAccount.objects.filter(id=filters[""federal_account""]).exists(): raise InvalidParameterException(""Federal Account with that ID does not exist"") query_filters[f""{tas_id}__federal_account__id""] = filters[""federal_account""] if filters.get(""budget_function"") and filters[""budget_function""] != ""all"": query_filters[f""{tas_id}__budget_function_code""] = filters[""budget_function""] if filters.get(""budget_subfunction"") and filters[""budget_subfunction""] != ""all"": query_filters[f""{tas_id}__budget_subfunction_code""] = filters[""budget_subfunction""] if account_type != ""account_balances"": # file A does not have DEFC field so we do not attempt to filter if filters.get(""def_codes"") and len(filters.get(""def_codes"")) > 0: query_filters[""disaster_emergency_fund__code__in""] = filters[""def_codes""] submission_filter = get_submission_filter(account_type, filters) # Make derivations based on the account level if account_level == ""treasury_account"": queryset = generate_treasury_account_query(download_table.objects, account_type, tas_id, filters) elif account_level == ""federal_account"": queryset = generate_federal_account_query(download_table.objects, account_type, tas_id, filters) else: raise InvalidParameterException( 'Invalid Parameter: account_level must be either ""federal_account"" or ""treasury_account""' ) # Apply filter and return return queryset.filter(submission_filter, **query_filters) ","def account_download_filter(account_type, download_table, filters, account_level=""treasury_account""): if account_level not in (""treasury_account"", ""federal_account""): raise InvalidParameterException( 'Invalid Parameter: account_level must be either ""federal_account"" or ""treasury_account""' ) query_filters = {} tas_id = ""treasury_account_identifier"" if account_type == ""account_balances"" else ""treasury_account"" if filters.get(""agency"") and filters[""agency""] != ""all"": if not ToptierAgency.objects.filter(toptier_agency_id=filters[""agency""]).exists(): raise InvalidParameterException(""Agency with that ID does not exist"") query_filters[f""{tas_id}__funding_toptier_agency_id""] = filters[""agency""] if filters.get(""federal_account"") and filters[""federal_account""] != ""all"": if not FederalAccount.objects.filter(id=filters[""federal_account""]).exists(): raise InvalidParameterException(""Federal Account with that ID does not exist"") query_filters[f""{tas_id}__federal_account__id""] = filters[""federal_account""] if filters.get(""budget_function"") and filters[""budget_function""] != ""all"": query_filters[f""{tas_id}__budget_function_code""] = filters[""budget_function""] if filters.get(""budget_subfunction"") and filters[""budget_subfunction""] != ""all"": query_filters[f""{tas_id}__budget_subfunction_code""] = filters[""budget_subfunction""] if account_type != ""account_balances"": # file A does not have DEFC field so we do not attempt to filter if filters.get(""def_codes"") and len(filters.get(""def_codes"")) > 0: query_filters[""disaster_emergency_fund_code__in""] = filters[""def_codes""] submission_filter = get_submission_filter(account_type, filters) # Make derivations based on the account level if account_level == ""treasury_account"": queryset = generate_treasury_account_query(download_table.objects, account_type, tas_id, filters) elif account_level == ""federal_account"": queryset = generate_federal_account_query(download_table.objects, account_type, tas_id, filters) else: raise InvalidParameterException( 'Invalid Parameter: account_level must be either ""federal_account"" or ""treasury_account""' ) # Apply filter and return return queryset.filter(submission_filter, **query_filters) " 57750,"def url_reputation_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]: urls = argToList(args.get('url')) if len(urls) == 0: raise ValueError('URL(s) not specified') command_results: List[CommandResults] = [] for url in urls: url_data = client.get_url_reputation(url, api_key) indicator_url = url_data['indicator'] reputation = url_data['risk'] score = convert_to_xsoar_severity(reputation) dbot_score = Common.DBotScore( indicator=str(indicator_url), indicator_type=DBotScoreType.URL, integration_name='Pulsedive', score=score, malicious_description=f'Pulsedive returned reputation {reputation}' ) url_standard_context = Common.URL( url=indicator_url, dbot_score=dbot_score ) url_data.pop('objects') url_data.pop('nir') command_results.append(CommandResults( readable_output=tableToMarkdown('URL List', url_data), outputs_prefix='Pulsedive.URL', outputs_key_field='indicator', outputs=url_data, indicator=url_standard_context )) return command_results ","def url_reputation_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]: urls = argToList(args.get('url')) if len(urls) == 0: raise ValueError('URL(s) not specified') command_results: List[CommandResults] = [] for url in urls: url_data = client.get_url_reputation(url, api_key) indicator_url = url_data['indicator'] reputation = url_data['risk'] score = convert_to_xsoar_severity(reputation) dbot_score = Common.DBotScore( indicator=str(indicator_url), indicator_type=DBotScoreType.URL, integration_name='Pulsedive', score=score, malicious_description=f'Pulsedive returned reputation {reputation}' ) url_standard_context = Common.URL( url=indicator_url, dbot_score=dbot_score ) url_data.pop('objects', None) url_data.pop('nir', None) command_results.append(CommandResults( readable_output=tableToMarkdown('URL List', url_data), outputs_prefix='Pulsedive.URL', outputs_key_field='indicator', outputs=url_data, indicator=url_standard_context )) return command_results " 45834,"def warp_perspective( src: torch.Tensor, M: torch.Tensor, dsize: Tuple[int, int], mode: str = 'bilinear', padding_mode: str = 'zeros', align_corners: Optional[bool] = None, normalized_homography: bool = False, normalized_coordinates: bool = True, ) -> torch.Tensor: r""""""Applies a perspective transformation to an image. The function warp_perspective transforms the source image using the specified matrix: .. math:: \text{dst} (x, y) = \text{src} \left( \frac{M^{-1}_{11} x + M^{-1}_{12} y + M^{-1}_{13}}{M^{-1}_{31} x + M^{-1}_{32} y + M^{-1}_{33}} , \frac{M^{-1}_{21} x + M^{-1}_{22} y + M^{-1}_{23}}{M^{-1}_{31} x + M^{-1}_{32} y + M^{-1}_{33}} \right ) Args: src (torch.Tensor): input image with shape :math:`(B, C, H, W)`. M (torch.Tensor): transformation matrix with shape :math:`(B, 3, 3)`. dsize (tuple): size of the output image (height, width). mode (str): interpolation mode to calculate output values 'bilinear' | 'nearest'. Default: 'bilinear'. padding_mode (str): padding mode for outside grid values 'zeros' | 'border' | 'reflection'. Default: 'zeros'. align_corners(bool, optional): interpolation flag. Default: None. normalized_homography(bool): Warp image patchs or tensors by normalized 2D homographies. See :class:`~kornia.geometry.warp.HomographyWarper` for details. normalized_coordinates (bool): Whether the homography assumes [-1, 1] normalized coordinates or not. Returns: torch.Tensor: the warped input image :math:`(B, C, H, W)`. Example: >>> img = torch.rand(1, 4, 5, 6) >>> H = torch.eye(3)[None] >>> out = warp_perspective(img, H, (4, 2), align_corners=True) >>> print(out.shape) torch.Size([1, 4, 4, 2]) .. note:: This function is often used in conjuntion with :func:`get_perspective_transform`. .. note:: See a working example `here `_. """""" if not isinstance(src, torch.Tensor): raise TypeError(""Input src type is not a torch.Tensor. Got {}"".format(type(src))) if not isinstance(M, torch.Tensor): raise TypeError(""Input M type is not a torch.Tensor. Got {}"".format(type(M))) if not len(src.shape) == 4: raise ValueError(""Input src must be a BxCxHxW tensor. Got {}"".format(src.shape)) if not (len(M.shape) == 3 and M.shape[-2:] == (3, 3)): raise ValueError(""Input M must be a Bx3x3 tensor. Got {}"".format(M.shape)) if not normalized_homography: # TODO: remove the statement below in kornia v0.6 if align_corners is None: message: str = ( ""The align_corners default value has been changed. By default now is set True "" ""in order to match cv2.warpPerspective. In case you want to keep your previous "" ""behaviour set it to False. This warning will disappear in kornia > v0.6."" ) warnings.warn(message) # set default value for align corners align_corners = True B, C, H, W = src.size() h_out, w_out = dsize # we normalize the 3x3 transformation matrix and convert to 3x4 dst_norm_trans_src_norm: torch.Tensor = normalize_homography(M, (H, W), (h_out, w_out)) # Bx3x3 src_norm_trans_dst_norm = _torch_inverse_cast(dst_norm_trans_src_norm) # Bx3x3 # this piece of code substitutes F.affine_grid since it does not support 3x3 grid = ( create_meshgrid(h_out, w_out, normalized_coordinates=normalized_coordinates, device=src.device) .to(src.dtype) .repeat(B, 1, 1, 1) ) grid = transform_points(src_norm_trans_dst_norm[:, None, None], grid) else: if align_corners is None: align_corners = False if not M.device == src.device: raise TypeError( ""Patch and homography must be on the same device. \ Got patch.device: {} M.device: {}."".format( src.device, M.device ) ) height, width = dsize grid = create_meshgrid(height, width, normalized_coordinates=normalized_coordinates) grid = warp_grid(grid, M) return F.grid_sample(src, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners) ","def warp_perspective( src: torch.Tensor, M: torch.Tensor, dsize: Tuple[int, int], mode: str = 'bilinear', padding_mode: str = 'zeros', align_corners: Optional[bool] = None, normalized_homography: bool = False, normalized_coordinates: bool = True, ) -> torch.Tensor: r""""""Applies a perspective transformation to an image. The function warp_perspective transforms the source image using the specified matrix: .. math:: \text{dst} (x, y) = \text{src} \left( \frac{M^{-1}_{11} x + M^{-1}_{12} y + M^{-1}_{13}}{M^{-1}_{31} x + M^{-1}_{32} y + M^{-1}_{33}} , \frac{M^{-1}_{21} x + M^{-1}_{22} y + M^{-1}_{23}}{M^{-1}_{31} x + M^{-1}_{32} y + M^{-1}_{33}} \right ) Args: src (torch.Tensor): input image with shape :math:`(B, C, H, W)`. M (torch.Tensor): transformation matrix with shape :math:`(B, 3, 3)`. dsize (tuple): size of the output image (height, width). mode (str): interpolation mode to calculate output values 'bilinear' | 'nearest'. Default: 'bilinear'. padding_mode (str): padding mode for outside grid values 'zeros' | 'border' | 'reflection'. Default: 'zeros'. align_corners(bool, optional): interpolation flag. Default: None. normalized_homography(bool): Warp image patchs or tensors by normalized 2D homographies. See :class:`~kornia.geometry.warp.HomographyWarper` for details. normalized_coordinates (bool): Whether the homography assumes [-1, 1] normalized coordinates or not. Returns: torch.Tensor: the warped input image :math:`(B, C, H, W)`. Example: >>> img = torch.rand(1, 4, 5, 6) >>> H = torch.eye(3)[None] >>> out = warp_perspective(img, H, (4, 2), align_corners=True) >>> print(out.shape) torch.Size([1, 4, 4, 2]) .. note:: This function is often used in conjuntion with :func:`get_perspective_transform`. .. note:: See a working example `here `_. """""" if not isinstance(src, torch.Tensor): raise TypeError(""Input src type is not a torch.Tensor. Got {}"".format(type(src))) if not isinstance(M, torch.Tensor): raise TypeError(""Input M type is not a torch.Tensor. Got {}"".format(type(M))) if not len(src.shape) == 4: raise ValueError(""Input src must be a BxCxHxW tensor. Got {}"".format(src.shape)) if not (len(M.shape) == 3 and M.shape[-2:] == (3, 3)): raise ValueError(""Input M must be a Bx3x3 tensor. Got {}"".format(M.shape)) if not normalized_homography: # TODO: remove the statement below in kornia v0.6 if align_corners is None: message: str = ( ""The align_corners default value has been changed. By default now is set True "" ""in order to match cv2.warpPerspective. In case you want to keep your previous "" ""behaviour set it to False. This warning will disappear in kornia > v0.6."" ) warnings.warn(message) # set default value for align corners align_corners = True B, C, H, W = src.size() h_out, w_out = dsize # we normalize the 3x3 transformation matrix and convert to 3x4 dst_norm_trans_src_norm: torch.Tensor = normalize_homography(M, (H, W), (h_out, w_out)) # Bx3x3 src_norm_trans_dst_norm = _torch_inverse_cast(dst_norm_trans_src_norm) # Bx3x3 # this piece of code substitutes F.affine_grid since it does not support 3x3 grid = ( create_meshgrid(h_out, w_out, normalized_coordinates=normalized_coordinates, device=src.device, dtype=src.dtype) .to(src.dtype) .repeat(B, 1, 1, 1) ) grid = transform_points(src_norm_trans_dst_norm[:, None, None], grid) else: if align_corners is None: align_corners = False if not M.device == src.device: raise TypeError( ""Patch and homography must be on the same device. \ Got patch.device: {} M.device: {}."".format( src.device, M.device ) ) height, width = dsize grid = create_meshgrid(height, width, normalized_coordinates=normalized_coordinates) grid = warp_grid(grid, M) return F.grid_sample(src, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners) " 5409,"def test_copy(): """""" Test if the source file exists on the system, copy it to the named file. """""" name = ""/tmp/salt"" source = ""/tmp/salt/salt"" user = ""salt"" group = ""saltstack"" ret = {""name"": name, ""result"": False, ""comment"": """", ""changes"": {}} comt = ""Must provide name to file.copy"" ret.update({""comment"": comt, ""name"": """"}) assert filestate.copy_("""", source) == ret mock_t = MagicMock(return_value=True) mock_f = MagicMock(return_value=False) mock_uid = MagicMock(side_effect=["""", ""1000"", ""1000""]) mock_gid = MagicMock(side_effect=["""", ""1000"", ""1000""]) mock_user = MagicMock(return_value=user) mock_grp = MagicMock(return_value=group) mock_io = MagicMock(side_effect=IOError) with patch.object(os.path, ""isabs"", mock_f): comt = ""Specified file {} is not an absolute path"".format(name) ret.update({""comment"": comt, ""name"": name}) assert filestate.copy_(name, source) == ret with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""exists"", mock_f): comt = 'Source file ""{}"" is not present'.format(source) ret.update({""comment"": comt, ""result"": False}) assert filestate.copy_(name, source) == ret with patch.object(os.path, ""exists"", mock_t): with patch.dict( filestate.__salt__, { ""file.user_to_uid"": mock_uid, ""file.group_to_gid"": mock_gid, ""file.get_user"": mock_user, ""file.get_group"": mock_grp, ""file.get_mode"": mock_grp, ""file.check_perms"": mock_t, }, ): # Group argument is ignored on Windows systems. Group is set # to user if salt.utils.platform.is_windows(): comt = ""User salt is not available Group salt"" "" is not available"" else: comt = ( ""User salt is not available Group saltstack"" "" is not available"" ) ret.update({""comment"": comt, ""result"": False}) assert filestate.copy_(name, source, user=user, group=group) == ret comt1 = ( 'Failed to delete ""{}"" in preparation for' "" forced move"".format(name) ) comt2 = ( 'The target file ""{}"" exists and will not be ' ""overwritten"".format(name) ) comt3 = 'File ""{}"" is set to be copied to ""{}""'.format(source, name) with patch.object(os.path, ""isdir"", mock_f): with patch.object(os.path, ""lexists"", mock_t): with patch.dict(filestate.__opts__, {""test"": False}): with patch.dict( filestate.__salt__, {""file.remove"": mock_io} ): ret.update({""comment"": comt1, ""result"": False}) assert ( filestate.copy_( name, source, preserve=True, force=True ) == ret ) with patch.object(os.path, ""isfile"", mock_t): ret.update({""comment"": comt2, ""result"": True}) assert ( filestate.copy_(name, source, preserve=True) == ret ) with patch.object(os.path, ""lexists"", mock_f): with patch.dict(filestate.__opts__, {""test"": True}): ret.update({""comment"": comt3, ""result"": None}) assert filestate.copy_(name, source, preserve=True) == ret with patch.dict(filestate.__opts__, {""test"": False}): comt = ""The target directory /tmp is"" "" not present"" ret.update({""comment"": comt, ""result"": False}) assert filestate.copy_(name, source, preserve=True) == ret check_perms_ret = { ""name"": name, ""changes"": {}, ""comment"": [], ""result"": True, } check_perms_perms = {} if salt.utils.platform.is_windows(): mock_check_perms = MagicMock(return_value=check_perms_ret) else: mock_check_perms = MagicMock( return_value=(check_perms_ret, check_perms_perms) ) with patch.dict( filestate.__salt__, { ""file.user_to_uid"": mock_uid, ""file.group_to_gid"": mock_gid, ""file.get_user"": mock_user, ""file.get_group"": mock_grp, ""file.get_mode"": mock_grp, ""file.check_perms"": mock_check_perms, }, ): comt = 'Copied ""{}"" to ""{}""'.format(source, name) with patch.dict(filestate.__opts__, {""user"": ""salt""}): with patch.object(os.path, ""isdir"", mock_t): with patch.object(os.path, ""lexists"", mock_f): with patch.dict(filestate.__opts__, {""test"": False}): with patch.dict( filestate.__salt__, {""file.remove"": mock_io} ): with patch.object(shutil, ""copytree"", MagicMock()): group = None ret.update( { ""comment"": comt, ""result"": True, ""changes"": { ""/tmp/salt"": ""/tmp/salt/salt"" }, } ) res = filestate.copy_( name, source, group=group, preserve=False ) assert res == ret comt = 'Copied ""{}"" to ""{}""'.format(source, name) with patch.dict(filestate.__opts__, {""user"": ""salt""}): with patch.object( os.path, ""isdir"", MagicMock(side_effect=[False, True, False]) ): with patch.object(os.path, ""lexists"", mock_f): with patch.dict(filestate.__opts__, {""test"": False}): with patch.dict( filestate.__salt__, {""file.remove"": mock_io} ): with patch.object(shutil, ""copy"", MagicMock()): group = None ret.update( { ""comment"": comt, ""result"": True, ""changes"": { ""/tmp/salt"": ""/tmp/salt/salt"" }, } ) res = filestate.copy_( name, source, group=group, preserve=False ) assert res == ret ","def test_copy(): """""" Test if the source file exists on the system, copy it to the named file. """""" name = ""/tmp/salt"" source = ""/tmp/salt/salt"" user = ""salt"" group = ""saltstack"" ret = {""name"": name, ""result"": False, ""comment"": """", ""changes"": {}} comt = ""Must provide name to file.copy"" ret.update({""comment"": comt, ""name"": """"}) assert filestate.copy_("""", source) == ret mock_t = MagicMock(return_value=True) mock_f = MagicMock(return_value=False) mock_uid = MagicMock(side_effect=["""", ""1000"", ""1000""]) mock_gid = MagicMock(side_effect=["""", ""1000"", ""1000""]) mock_user = MagicMock(return_value=user) mock_grp = MagicMock(return_value=group) mock_io = MagicMock(side_effect=IOError) with patch.object(os.path, ""isabs"", mock_f): comt = ""Specified file {} is not an absolute path"".format(name) ret.update({""comment"": comt, ""name"": name}) assert filestate.copy_(name, source) == ret with patch.object(os.path, ""isabs"", mock_t): with patch.object(os.path, ""exists"", mock_f): comt = 'Source file ""{}"" is not present'.format(source) ret.update({""comment"": comt, ""result"": False}) assert filestate.copy_(name, source) == ret with patch.object(os.path, ""exists"", mock_t): with patch.dict( filestate.__salt__, { ""file.user_to_uid"": mock_uid, ""file.group_to_gid"": mock_gid, ""file.get_user"": mock_user, ""file.get_group"": mock_grp, ""file.get_mode"": mock_grp, ""file.check_perms"": mock_t, }, ): # Group argument is ignored on Windows systems. Group is set # to user if salt.utils.platform.is_windows(): comt = ""User salt is not available Group salt"" "" is not available"" else: comt = ( ""User salt is not available Group saltstack is not available"" ) ret.update({""comment"": comt, ""result"": False}) assert filestate.copy_(name, source, user=user, group=group) == ret comt1 = ( 'Failed to delete ""{}"" in preparation for' "" forced move"".format(name) ) comt2 = ( 'The target file ""{}"" exists and will not be ' ""overwritten"".format(name) ) comt3 = 'File ""{}"" is set to be copied to ""{}""'.format(source, name) with patch.object(os.path, ""isdir"", mock_f): with patch.object(os.path, ""lexists"", mock_t): with patch.dict(filestate.__opts__, {""test"": False}): with patch.dict( filestate.__salt__, {""file.remove"": mock_io} ): ret.update({""comment"": comt1, ""result"": False}) assert ( filestate.copy_( name, source, preserve=True, force=True ) == ret ) with patch.object(os.path, ""isfile"", mock_t): ret.update({""comment"": comt2, ""result"": True}) assert ( filestate.copy_(name, source, preserve=True) == ret ) with patch.object(os.path, ""lexists"", mock_f): with patch.dict(filestate.__opts__, {""test"": True}): ret.update({""comment"": comt3, ""result"": None}) assert filestate.copy_(name, source, preserve=True) == ret with patch.dict(filestate.__opts__, {""test"": False}): comt = ""The target directory /tmp is"" "" not present"" ret.update({""comment"": comt, ""result"": False}) assert filestate.copy_(name, source, preserve=True) == ret check_perms_ret = { ""name"": name, ""changes"": {}, ""comment"": [], ""result"": True, } check_perms_perms = {} if salt.utils.platform.is_windows(): mock_check_perms = MagicMock(return_value=check_perms_ret) else: mock_check_perms = MagicMock( return_value=(check_perms_ret, check_perms_perms) ) with patch.dict( filestate.__salt__, { ""file.user_to_uid"": mock_uid, ""file.group_to_gid"": mock_gid, ""file.get_user"": mock_user, ""file.get_group"": mock_grp, ""file.get_mode"": mock_grp, ""file.check_perms"": mock_check_perms, }, ): comt = 'Copied ""{}"" to ""{}""'.format(source, name) with patch.dict(filestate.__opts__, {""user"": ""salt""}): with patch.object(os.path, ""isdir"", mock_t): with patch.object(os.path, ""lexists"", mock_f): with patch.dict(filestate.__opts__, {""test"": False}): with patch.dict( filestate.__salt__, {""file.remove"": mock_io} ): with patch.object(shutil, ""copytree"", MagicMock()): group = None ret.update( { ""comment"": comt, ""result"": True, ""changes"": { ""/tmp/salt"": ""/tmp/salt/salt"" }, } ) res = filestate.copy_( name, source, group=group, preserve=False ) assert res == ret comt = 'Copied ""{}"" to ""{}""'.format(source, name) with patch.dict(filestate.__opts__, {""user"": ""salt""}): with patch.object( os.path, ""isdir"", MagicMock(side_effect=[False, True, False]) ): with patch.object(os.path, ""lexists"", mock_f): with patch.dict(filestate.__opts__, {""test"": False}): with patch.dict( filestate.__salt__, {""file.remove"": mock_io} ): with patch.object(shutil, ""copy"", MagicMock()): group = None ret.update( { ""comment"": comt, ""result"": True, ""changes"": { ""/tmp/salt"": ""/tmp/salt/salt"" }, } ) res = filestate.copy_( name, source, group=group, preserve=False ) assert res == ret " 40455,"def test_agg_norm(): x = torch.randn(6, 16) index = torch.tensor([0, 0, 1, 1, 1, 2]) norm = AggSubtraction() out = norm(x) assert out.shape == (6, 16) assert out.mean().item() == pytest.approx(0, abs=1e-6) out = norm(x, index) assert out.shape == (6, 16) assert out[0:2].mean().item() == pytest.approx(0, abs=1e-6) assert out[0:2].mean().item() == pytest.approx(0, abs=1e-6) ","def test_agg_norm(): x = torch.randn(6, 16) index = torch.tensor([0, 0, 1, 1, 1, 2]) norm = AggSubtraction() out = norm(x) assert out.shape == (6, 16) assert torch.allclose(out.mean(), torch.tensor(0.)) out = norm(x, index) assert out.shape == (6, 16) assert out[0:2].mean().item() == pytest.approx(0, abs=1e-6) assert out[0:2].mean().item() == pytest.approx(0, abs=1e-6) " 43946,"def drawable_layers(ops, wire_map=None): """"""Determine non-overlapping yet dense placement of operations for drawing. Args: ops Iterable[~.Operator]: a list of operations Keyword Args: wire_map=None (dict): a map from wire label to non-negative integers Returns: list[set[~.Operator]] : Each index is a set of operations for the corresponding layer """""" if wire_map is None: wire_map = default_wire_map(ops) # initialize max_layer = 0 occupied_wires_per_layer = [set()] ops_per_layer = [set()] # loop over operations for op in ops: if len(op.wires) == 0: # if no wires, then it acts of all wires # for example, state and sample mapped_wires = set(wire_map.values()) op_occupied_wires = mapped_wires else: mapped_wires = {wire_map[wire] for wire in op.wires} op_occupied_wires = set(range(min(mapped_wires), max(mapped_wires) + 1)) op_layer = _recursive_find_layer(max_layer, op_occupied_wires, occupied_wires_per_layer) # see if need to add new layer if op_layer > max_layer: max_layer += 1 occupied_wires_per_layer.append(set()) ops_per_layer.append(set()) # Add to op_layer ops_per_layer[op_layer].add(op) occupied_wires_per_layer[op_layer].update(op_occupied_wires) return ops_per_layer ","def drawable_layers(ops, wire_map=None): """"""Determine non-overlapping yet dense placement of operations for drawing. Args: ops Iterable[~.Operator]: a list of operations Keyword Args: wire_map=None (dict): a map from wire label to non-negative integers Returns: list[set[~.Operator]] : Each index is a set of operations for the corresponding layer """""" if wire_map is None: wire_map = default_wire_map(ops) # initialize max_layer = 0 occupied_wires_per_layer = [set()] ops_per_layer = [set()] # loop over operations for op in ops: if len(op.wires) == 0: # if no wires, then it acts on all wires # for example, state and sample mapped_wires = set(wire_map.values()) op_occupied_wires = mapped_wires else: mapped_wires = {wire_map[wire] for wire in op.wires} op_occupied_wires = set(range(min(mapped_wires), max(mapped_wires) + 1)) op_layer = _recursive_find_layer(max_layer, op_occupied_wires, occupied_wires_per_layer) # see if need to add new layer if op_layer > max_layer: max_layer += 1 occupied_wires_per_layer.append(set()) ops_per_layer.append(set()) # Add to op_layer ops_per_layer[op_layer].add(op) occupied_wires_per_layer[op_layer].update(op_occupied_wires) return ops_per_layer " 29822,"def sync_secrets( kube_client: KubeClient, cluster: str, service: str, secret_provider_name: str, vault_cluster_config: Mapping[str, str], soa_dir: str, namespace: str, vault_token_file: str, ) -> bool: secret_dir = os.path.join(soa_dir, service, ""secrets"") secret_provider_kwargs = { ""vault_cluster_config"": vault_cluster_config, # TODO: make vault-tools support k8s auth method so we don't have to # mount a token in. ""vault_auth_method"": ""token"", ""vault_token_file"": vault_token_file, } secret_provider = get_secret_provider( secret_provider_name=secret_provider_name, soa_dir=soa_dir, service_name=service, cluster_names=[cluster], secret_provider_kwargs=secret_provider_kwargs, ) if not os.path.isdir(secret_dir): log.debug(f""No secrets dir for {service}"") return True with os.scandir(secret_dir) as secret_file_paths: for secret_file_path in secret_file_paths: if secret_file_path.path.endswith(""json""): secret = secret_file_path.name.replace("".json"", """") with open(secret_file_path, ""r"") as secret_file: secret_data = json.load(secret_file) secret_signature = secret_provider.get_secret_signature_from_data( secret_data ) if secret_signature: kubernetes_secret_signature = get_kubernetes_secret_signature( kube_client=kube_client, secret=secret, service=service, namespace=namespace, ) if not kubernetes_secret_signature: log.info( f""{secret} for {service} not found in {namespace}, creating"" ) try: create_secret( kube_client=kube_client, secret=secret, service=service, secret_provider=secret_provider, namespace=namespace, ) except ApiException as e: if e.status == 409: log.warning( f""Secret {secret} for {service} already exists in {namespace}"" ) else: raise create_kubernetes_secret_signature( kube_client=kube_client, secret=secret, service=service, secret_signature=secret_signature, namespace=namespace, ) elif secret_signature != kubernetes_secret_signature: log.info( f""{secret} for {service} in {namespace} needs updating as signature changed"" ) update_secret( kube_client=kube_client, secret=secret, service=service, secret_provider=secret_provider, namespace=namespace, ) update_kubernetes_secret_signature( kube_client=kube_client, secret=secret, service=service, secret_signature=secret_signature, namespace=namespace, ) else: log.info(f""{secret} for {service} in {namespace} up to date"") return True ","def sync_secrets( kube_client: KubeClient, cluster: str, service: str, secret_provider_name: str, vault_cluster_config: Mapping[str, str], soa_dir: str, namespace: str, vault_token_file: str = DEFAULT_VAULT_TOKEN_FILE, ) -> bool: secret_dir = os.path.join(soa_dir, service, ""secrets"") secret_provider_kwargs = { ""vault_cluster_config"": vault_cluster_config, # TODO: make vault-tools support k8s auth method so we don't have to # mount a token in. ""vault_auth_method"": ""token"", ""vault_token_file"": vault_token_file, } secret_provider = get_secret_provider( secret_provider_name=secret_provider_name, soa_dir=soa_dir, service_name=service, cluster_names=[cluster], secret_provider_kwargs=secret_provider_kwargs, ) if not os.path.isdir(secret_dir): log.debug(f""No secrets dir for {service}"") return True with os.scandir(secret_dir) as secret_file_paths: for secret_file_path in secret_file_paths: if secret_file_path.path.endswith(""json""): secret = secret_file_path.name.replace("".json"", """") with open(secret_file_path, ""r"") as secret_file: secret_data = json.load(secret_file) secret_signature = secret_provider.get_secret_signature_from_data( secret_data ) if secret_signature: kubernetes_secret_signature = get_kubernetes_secret_signature( kube_client=kube_client, secret=secret, service=service, namespace=namespace, ) if not kubernetes_secret_signature: log.info( f""{secret} for {service} not found in {namespace}, creating"" ) try: create_secret( kube_client=kube_client, secret=secret, service=service, secret_provider=secret_provider, namespace=namespace, ) except ApiException as e: if e.status == 409: log.warning( f""Secret {secret} for {service} already exists in {namespace}"" ) else: raise create_kubernetes_secret_signature( kube_client=kube_client, secret=secret, service=service, secret_signature=secret_signature, namespace=namespace, ) elif secret_signature != kubernetes_secret_signature: log.info( f""{secret} for {service} in {namespace} needs updating as signature changed"" ) update_secret( kube_client=kube_client, secret=secret, service=service, secret_provider=secret_provider, namespace=namespace, ) update_kubernetes_secret_signature( kube_client=kube_client, secret=secret, service=service, secret_signature=secret_signature, namespace=namespace, ) else: log.info(f""{secret} for {service} in {namespace} up to date"") return True " 37186,"def _log_assembly_time(start_time, end_time): log_msg = ""Total Assembly Time - %.5f (ms)"" % ((end_time - start_time) * 1000) LOG.info(log_msg) ","def _log_assembly_time(start_time, end_time): log_msg = ""Total Assembly Time - %.5f (ms)"" % ((end_time - start_time) / 1000) LOG.info(log_msg) " 24483,"def assert_all_metrics(aggregator, minimun_tags=None, hostname=None): for metric, metric_type in METRICS: aggregator.assert_metric(metric, metric_type=getattr(aggregator, metric_type.upper()), hostname=hostname) minimun_tags = minimun_tags or [] for tag in minimun_tags: aggregator.assert_metric_has_tag(metric, tag) for metric, metric_type in OPTIONAL_METRICS: aggregator.assert_metric( metric, metric_type=getattr(aggregator, metric_type.upper()), hostname=hostname, at_least=0 ) aggregator.assert_all_metrics_covered() ","def assert_all_metrics(aggregator, minimum_tags=None, hostname=None): for metric, metric_type in METRICS: aggregator.assert_metric(metric, metric_type=getattr(aggregator, metric_type.upper()), hostname=hostname) minimun_tags = minimun_tags or [] for tag in minimun_tags: aggregator.assert_metric_has_tag(metric, tag) for metric, metric_type in OPTIONAL_METRICS: aggregator.assert_metric( metric, metric_type=getattr(aggregator, metric_type.upper()), hostname=hostname, at_least=0 ) aggregator.assert_all_metrics_covered() " 7874,"def test_get_elements(): # test that zero elements exist on creation m = openmc.Material() assert len(m.get_elements()) == 0 # test addition of a single element m.add_element('Li', 0.2) assert len(m.get_elements()) == 1 assert 'Li' in m.get_elements() # test that adding the same element m.add_element('Li', 0.3) assert len(m.get_elements()) == 1 assert 'Li' in m.get_elements() # test adding another element m.add_element('Si', 0.3) assert len(m.get_elements()) == 2 assert 'Si' in m.get_elements() # test adding a third element m.add_element('O', 0.4) assert len(m.get_elements()) == 3 # test removal of nuclides m.remove_nuclide('O16') m.remove_nuclide('O17') assert 'O' not in m.get_elements() assert 'Si' in m.get_elements() assert 'Li' in m.get_elements() assert len(m.get_elements()) == 2 ","def test_get_elements(): # test that zero elements exist on creation m = openmc.Material() assert len(m.get_elements()) == 0 # test addition of a single element m.add_element('Li', 0.2) assert len(m.get_elements()) == 1 assert 'Li' in m.get_elements() # test that adding the same element m.add_element('Li', 0.3) assert len(m.get_elements()) == 1 assert 'Li' in m.get_elements() # test adding another element m.add_element('Si', 0.3) assert len(m.get_elements()) == 2 assert m.get_elements() == [""Li"", ""Si""] # test adding a third element m.add_element('O', 0.4) assert len(m.get_elements()) == 3 # test removal of nuclides m.remove_nuclide('O16') m.remove_nuclide('O17') assert 'O' not in m.get_elements() assert 'Si' in m.get_elements() assert 'Li' in m.get_elements() assert len(m.get_elements()) == 2 " 13406,"def test_05_verify_logs_collection_still_work_after_enrcripted_dataset_is_moved_to_system_dataset(logs_data): cmd = ""cat /var/log/middlewared.log"" middlewared_log = SSH_TEST(cmd, user, password, ip) assert middlewared_log['result'] is True, str(middlewared_log) logs_data['middleware_log_2'] = middlewared_log['output'].splitlines()[-1] assert logs_data['middleware_log_1'] in middlewared_log['output'], str(middlewared_log['output']) assert logs_data['middleware_log_1'] != logs_data['middleware_log_2'] cmd = ""journalctl --no-page"" journald_log = SSH_TEST(cmd, user, password, ip) assert journald_log['result'] is True, str(journald_log) logs_data['journald_log_2'] = journald_log['output'].splitlines()[-1] assert logs_data['journald_log_1'] in journald_log['output'], str(journald_log['output']) assert logs_data['journald_log_1'] != logs_data['journald_log_2'] cmd = ""cat /var/log/syslog"" syslog = SSH_TEST(cmd, user, password, ip) assert syslog['result'] is True, str(syslog) logs_data['syslog_2'] = syslog['output'].splitlines()[-1] assert logs_data['syslog_1'] in syslog['output'], str(syslog['output']) assert logs_data['syslog_1'] != logs_data['syslog_2'] ","def test_05_verify_logs_after_sysds_is_moved_to_a_passphrase_encrypted_pool(logs_data): cmd = ""cat /var/log/middlewared.log"" middlewared_log = SSH_TEST(cmd, user, password, ip) assert middlewared_log['result'] is True, str(middlewared_log) logs_data['middleware_log_2'] = middlewared_log['output'].splitlines()[-1] assert logs_data['middleware_log_1'] in middlewared_log['output'], str(middlewared_log['output']) assert logs_data['middleware_log_1'] != logs_data['middleware_log_2'] cmd = ""journalctl --no-page"" journald_log = SSH_TEST(cmd, user, password, ip) assert journald_log['result'] is True, str(journald_log) logs_data['journald_log_2'] = journald_log['output'].splitlines()[-1] assert logs_data['journald_log_1'] in journald_log['output'], str(journald_log['output']) assert logs_data['journald_log_1'] != logs_data['journald_log_2'] cmd = ""cat /var/log/syslog"" syslog = SSH_TEST(cmd, user, password, ip) assert syslog['result'] is True, str(syslog) logs_data['syslog_2'] = syslog['output'].splitlines()[-1] assert logs_data['syslog_1'] in syslog['output'], str(syslog['output']) assert logs_data['syslog_1'] != logs_data['syslog_2'] " 50540,"def plot_dataframe( df, column=None, cmap=None, color=None, ax=None, cax=None, categorical=False, legend=False, scheme=None, k=5, vmin=None, vmax=None, markersize=None, figsize=None, legend_kwds=None, categories=None, classification_kwds=None, missing_kwds=None, aspect=""auto"", **style_kwds ): """""" Plot a GeoDataFrame. Generate a plot of a GeoDataFrame with matplotlib. If a column is specified, the plot coloring will be based on values in that column. Parameters ---------- df : GeoDataFrame The GeoDataFrame to be plotted. Currently Polygon, MultiPolygon, LineString, MultiLineString and Point geometries can be plotted. column : str, np.array, pd.Series (default None) The name of the dataframe column, np.array, or pd.Series to be plotted. If np.array or pd.Series are used then it must have same length as dataframe. Values are used to color the plot. Ignored if `color` is also set. cmap : str (default None) The name of a colormap recognized by matplotlib. color : str (default None) If specified, all objects will be colored uniformly. ax : matplotlib.pyplot.Artist (default None) axes on which to draw the plot cax : matplotlib.pyplot Artist (default None) axes on which to draw the legend in case of color map. categorical : bool (default False) If False, cmap will reflect numerical values of the column being plotted. For non-numerical columns, this will be set to True. legend : bool (default False) Plot a legend. Ignored if no `column` is given, or if `color` is given. scheme : str (default None) Name of a choropleth classification scheme (requires mapclassify). A mapclassify.MapClassifier object will be used under the hood. Supported are all schemes provided by mapclassify (e.g. 'BoxPlot', 'EqualInterval', 'FisherJenks', 'FisherJenksSampled', 'HeadTailBreaks', 'JenksCaspall', 'JenksCaspallForced', 'JenksCaspallSampled', 'MaxP', 'MaximumBreaks', 'NaturalBreaks', 'Quantiles', 'Percentiles', 'StdMean', 'UserDefined'). Arguments can be passed in classification_kwds. k : int (default 5) Number of classes (ignored if scheme is None) vmin : None or float (default None) Minimum value of cmap. If None, the minimum data value in the column to be plotted is used. vmax : None or float (default None) Maximum value of cmap. If None, the maximum data value in the column to be plotted is used. markersize : str or float or sequence (default None) Only applies to point geometries within a frame. If a str, will use the values in the column of the frame specified by markersize to set the size of markers. Otherwise can be a value to apply to all points, or a sequence of the same length as the number of points. figsize : tuple of integers (default None) Size of the resulting matplotlib.figure.Figure. If the argument axes is given explicitly, figsize is ignored. legend_kwds : dict (default None) Keyword arguments to pass to matplotlib.pyplot.legend() or matplotlib.pyplot.colorbar(). Additional accepted keywords when `scheme` is specified: fmt : string A formatting specification for the bin edges of the classes in the legend. For example, to have no decimals: ``{""fmt"": ""{:.0f}""}``. labels : list-like A list of legend labels to override the auto-generated labels. Needs to have the same number of elements as the number of classes (`k`). interval : boolean An option to remove brackets from mapclassify legend. If True, open/closed interval brackets are shown in legend. categories : list-like Ordered list-like object of categories to be used for categorical plot. classification_kwds : dict (default None) Keyword arguments to pass to mapclassify missing_kwds : dict (default None) Keyword arguments specifying color options (as style_kwds) to be passed on to geometries with missing values in addition to or overwriting other style kwds. If None, geometries with missing values are not plotted. aspect : 'auto', 'equal', None or float (default 'auto') Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if however data are not projected (coordinates are long/lat), the aspect is by default set to 1/cos(df_y * pi/180) with df_y the y coordinate of the middle of the GeoDataFrame (the mean of the y range of bounding box) so that a long/lat square appears square in the middle of the plot. This implies an Equirectangular projection. If None, the aspect of `ax` won't be changed. It can also be set manually (float) as the ratio of y-unit to x-unit. **style_kwds : dict Style options to be passed on to the actual plot function, such as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``, ``alpha``. Returns ------- ax : matplotlib axes instance """""" if ""colormap"" in style_kwds: warnings.warn( ""'colormap' is deprecated, please use 'cmap' instead "" ""(for consistency with matplotlib)"", FutureWarning, ) cmap = style_kwds.pop(""colormap"") if ""axes"" in style_kwds: warnings.warn( ""'axes' is deprecated, please use 'ax' instead "" ""(for consistency with pandas)"", FutureWarning, ) ax = style_kwds.pop(""axes"") if column is not None and color is not None: warnings.warn( ""Only specify one of 'column' or 'color'. Using 'color'."", UserWarning ) column = None try: import matplotlib.pyplot as plt except ImportError: raise ImportError( ""The matplotlib package is required for plotting in geopandas. "" ""You can install it using 'conda install -c conda-forge matplotlib' or "" ""'pip install matplotlib'."" ) if ax is None: if cax is not None: raise ValueError(""'ax' can not be None if 'cax' is not."") fig, ax = plt.subplots(figsize=figsize) if aspect == ""auto"": if df.crs and df.crs.is_geographic: bounds = df.total_bounds y_coord = np.mean([bounds[1], bounds[3]]) ax.set_aspect(1 / np.cos(y_coord * np.pi / 180)) # formula ported from R package sp # https://github.com/edzer/sp/blob/master/R/mapasp.R else: ax.set_aspect(""equal"") elif aspect is not None: ax.set_aspect(aspect) # GH 1555 # if legend_kwds set, copy so we don't update it in place if legend_kwds is not None: legend_kwds = legend_kwds.copy() if df.empty: warnings.warn( ""The GeoDataFrame you are attempting to plot is "" ""empty. Nothing has been displayed."", UserWarning, ) return ax if isinstance(markersize, str): markersize = df[markersize].values if column is None: return plot_series( df.geometry, cmap=cmap, color=color, ax=ax, figsize=figsize, markersize=markersize, aspect=aspect, **style_kwds ) # To accept pd.Series and np.arrays as column if isinstance(column, (np.ndarray, pd.Series)): if column.shape[0] != df.shape[0]: raise ValueError( ""The dataframe and given column have different number of rows."" ) else: values = column else: values = df[column] if pd.api.types.is_categorical_dtype(values.dtype): if categories is not None: raise ValueError( ""Cannot specify 'categories' when column has categorical dtype"" ) categorical = True elif values.dtype is np.dtype(""O"") or categories: categorical = True nan_idx = np.asarray(pd.isna(values), dtype=""bool"") # Define `values` as a Series if categorical: if cmap is None: cmap = ""tab10"" cat = pd.Categorical(values, categories=categories) categories = list(cat.categories) # values missing in the Categorical but not in original values missing = list(np.unique(values[~nan_idx & cat.isna()])) if missing: raise ValueError( ""Column contains values not listed in categories. "" ""Missing categories: {}."".format(missing) ) values = cat.codes[~nan_idx] vmin = 0 if vmin is None else vmin vmax = len(categories) - 1 if vmax is None else vmax if scheme is not None: if classification_kwds is None: classification_kwds = {} if ""k"" not in classification_kwds: classification_kwds[""k""] = k binning = _mapclassify_choro(values[~nan_idx], scheme, **classification_kwds) # set categorical to True for creating the legend categorical = True if legend_kwds is not None and ""labels"" in legend_kwds: if len(legend_kwds[""labels""]) != binning.k: raise ValueError( ""Number of labels must match number of bins, "" ""received {} labels for {} bins"".format( len(legend_kwds[""labels""]), binning.k ) ) else: categories = list(legend_kwds.pop(""labels"")) else: fmt = ""{:.2f}"" if legend_kwds is not None and ""fmt"" in legend_kwds: fmt = legend_kwds.pop(""fmt"") categories = binning.get_legend_classes(fmt) show_interval = True if legend_kwds is not None and ""interval"" in legend_kwds: show_interval = legend_kwds.pop(""interval"") if not show_interval: categories = [ c.replace(""("", """").replace("")"", """").replace(""["", """").replace(""]"", """") for c in categories ] values = np.array(binning.yb) # fill values with placeholder where were NaNs originally to map them properly # (after removing them in categorical or scheme) if categorical: for n in np.where(nan_idx)[0]: values = np.insert(values, n, values[0]) mn = values[~np.isnan(values)].min() if vmin is None else vmin mx = values[~np.isnan(values)].max() if vmax is None else vmax # decompose GeometryCollections geoms, multiindex = _flatten_multi_geoms(df.geometry, prefix=""Geom"") values = np.take(values, multiindex, axis=0) nan_idx = np.take(nan_idx, multiindex, axis=0) expl_series = geopandas.GeoSeries(geoms) geom_types = expl_series.type poly_idx = np.asarray((geom_types == ""Polygon"") | (geom_types == ""MultiPolygon"")) line_idx = np.asarray( (geom_types == ""LineString"") | (geom_types == ""MultiLineString"") | (geom_types == ""LinearRing"") ) point_idx = np.asarray((geom_types == ""Point"") | (geom_types == ""MultiPoint"")) # plot all Polygons and all MultiPolygon components in the same collection polys = expl_series[poly_idx & np.invert(nan_idx)] subset = values[poly_idx & np.invert(nan_idx)] if not polys.empty: _plot_polygon_collection( ax, polys, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds ) # plot all LineStrings and MultiLineString components in same collection lines = expl_series[line_idx & np.invert(nan_idx)] subset = values[line_idx & np.invert(nan_idx)] if not lines.empty: _plot_linestring_collection( ax, lines, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds ) # plot all Points in the same collection points = expl_series[point_idx & np.invert(nan_idx)] subset = values[point_idx & np.invert(nan_idx)] if not points.empty: if isinstance(markersize, np.ndarray): markersize = np.take(markersize, multiindex, axis=0) markersize = markersize[point_idx & np.invert(nan_idx)] _plot_point_collection( ax, points, subset, vmin=mn, vmax=mx, markersize=markersize, cmap=cmap, **style_kwds ) if missing_kwds is not None: if color: if ""color"" not in missing_kwds: missing_kwds[""color""] = color merged_kwds = style_kwds.copy() merged_kwds.update(missing_kwds) plot_series(expl_series[nan_idx], ax=ax, **merged_kwds) if legend and not color: if legend_kwds is None: legend_kwds = {} if ""fmt"" in legend_kwds: legend_kwds.pop(""fmt"") from matplotlib.lines import Line2D from matplotlib.colors import Normalize from matplotlib import cm norm = style_kwds.get(""norm"", None) if not norm: norm = Normalize(vmin=mn, vmax=mx) n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap) if categorical: patches = [] for value, cat in enumerate(categories): patches.append( Line2D( [0], [0], linestyle=""none"", marker=""o"", alpha=style_kwds.get(""alpha"", 1), markersize=10, markerfacecolor=n_cmap.to_rgba(value), markeredgewidth=0, ) ) if missing_kwds is not None: if ""color"" in merged_kwds: merged_kwds[""facecolor""] = merged_kwds[""color""] patches.append( Line2D( [0], [0], linestyle=""none"", marker=""o"", alpha=merged_kwds.get(""alpha"", 1), markersize=10, markerfacecolor=merged_kwds.get(""facecolor"", None), markeredgecolor=merged_kwds.get(""edgecolor"", None), markeredgewidth=merged_kwds.get( ""linewidth"", 1 if merged_kwds.get(""edgecolor"", False) else 0 ), ) ) categories.append(merged_kwds.get(""label"", ""NaN"")) legend_kwds.setdefault(""numpoints"", 1) legend_kwds.setdefault(""loc"", ""best"") ax.legend(patches, categories, **legend_kwds) else: if cax is not None: legend_kwds.setdefault(""cax"", cax) else: legend_kwds.setdefault(""ax"", ax) n_cmap.set_array([]) ax.get_figure().colorbar(n_cmap, **legend_kwds) plt.draw() return ax ","def plot_dataframe( df, column=None, cmap=None, color=None, ax=None, cax=None, categorical=False, legend=False, scheme=None, k=5, vmin=None, vmax=None, markersize=None, figsize=None, legend_kwds=None, categories=None, classification_kwds=None, missing_kwds=None, aspect=""auto"", **style_kwds ): """""" Plot a GeoDataFrame. Generate a plot of a GeoDataFrame with matplotlib. If a column is specified, the plot coloring will be based on values in that column. Parameters ---------- df : GeoDataFrame The GeoDataFrame to be plotted. Currently Polygon, MultiPolygon, LineString, MultiLineString and Point geometries can be plotted. column : str, np.array, pd.Series (default None) The name of the dataframe column, np.array, or pd.Series to be plotted. If np.array or pd.Series are used then it must have same length as dataframe. Values are used to color the plot. Ignored if `color` is also set. cmap : str (default None) The name of a colormap recognized by matplotlib. color : str (default None) If specified, all objects will be colored uniformly. ax : matplotlib.pyplot.Artist (default None) axes on which to draw the plot cax : matplotlib.pyplot Artist (default None) axes on which to draw the legend in case of color map. categorical : bool (default False) If False, cmap will reflect numerical values of the column being plotted. For non-numerical columns, this will be set to True. legend : bool (default False) Plot a legend. Ignored if no `column` is given, or if `color` is given. scheme : str (default None) Name of a choropleth classification scheme (requires mapclassify). A mapclassify.MapClassifier object will be used under the hood. Supported are all schemes provided by mapclassify (e.g. 'BoxPlot', 'EqualInterval', 'FisherJenks', 'FisherJenksSampled', 'HeadTailBreaks', 'JenksCaspall', 'JenksCaspallForced', 'JenksCaspallSampled', 'MaxP', 'MaximumBreaks', 'NaturalBreaks', 'Quantiles', 'Percentiles', 'StdMean', 'UserDefined'). Arguments can be passed in classification_kwds. k : int (default 5) Number of classes (ignored if scheme is None) vmin : None or float (default None) Minimum value of cmap. If None, the minimum data value in the column to be plotted is used. vmax : None or float (default None) Maximum value of cmap. If None, the maximum data value in the column to be plotted is used. markersize : str or float or sequence (default None) Only applies to point geometries within a frame. If a str, will use the values in the column of the frame specified by markersize to set the size of markers. Otherwise can be a value to apply to all points, or a sequence of the same length as the number of points. figsize : tuple of integers (default None) Size of the resulting matplotlib.figure.Figure. If the argument axes is given explicitly, figsize is ignored. legend_kwds : dict (default None) Keyword arguments to pass to matplotlib.pyplot.legend() or matplotlib.pyplot.colorbar(). Additional accepted keywords when `scheme` is specified: fmt : string A formatting specification for the bin edges of the classes in the legend. For example, to have no decimals: ``{""fmt"": ""{:.0f}""}``. labels : list-like A list of legend labels to override the auto-generated labels. Needs to have the same number of elements as the number of classes (`k`). interval : boolean (default True) An option to control brackets from mapclassify legend. If True, open/closed interval brackets are shown in the legend. categories : list-like Ordered list-like object of categories to be used for categorical plot. classification_kwds : dict (default None) Keyword arguments to pass to mapclassify missing_kwds : dict (default None) Keyword arguments specifying color options (as style_kwds) to be passed on to geometries with missing values in addition to or overwriting other style kwds. If None, geometries with missing values are not plotted. aspect : 'auto', 'equal', None or float (default 'auto') Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if however data are not projected (coordinates are long/lat), the aspect is by default set to 1/cos(df_y * pi/180) with df_y the y coordinate of the middle of the GeoDataFrame (the mean of the y range of bounding box) so that a long/lat square appears square in the middle of the plot. This implies an Equirectangular projection. If None, the aspect of `ax` won't be changed. It can also be set manually (float) as the ratio of y-unit to x-unit. **style_kwds : dict Style options to be passed on to the actual plot function, such as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``, ``alpha``. Returns ------- ax : matplotlib axes instance """""" if ""colormap"" in style_kwds: warnings.warn( ""'colormap' is deprecated, please use 'cmap' instead "" ""(for consistency with matplotlib)"", FutureWarning, ) cmap = style_kwds.pop(""colormap"") if ""axes"" in style_kwds: warnings.warn( ""'axes' is deprecated, please use 'ax' instead "" ""(for consistency with pandas)"", FutureWarning, ) ax = style_kwds.pop(""axes"") if column is not None and color is not None: warnings.warn( ""Only specify one of 'column' or 'color'. Using 'color'."", UserWarning ) column = None try: import matplotlib.pyplot as plt except ImportError: raise ImportError( ""The matplotlib package is required for plotting in geopandas. "" ""You can install it using 'conda install -c conda-forge matplotlib' or "" ""'pip install matplotlib'."" ) if ax is None: if cax is not None: raise ValueError(""'ax' can not be None if 'cax' is not."") fig, ax = plt.subplots(figsize=figsize) if aspect == ""auto"": if df.crs and df.crs.is_geographic: bounds = df.total_bounds y_coord = np.mean([bounds[1], bounds[3]]) ax.set_aspect(1 / np.cos(y_coord * np.pi / 180)) # formula ported from R package sp # https://github.com/edzer/sp/blob/master/R/mapasp.R else: ax.set_aspect(""equal"") elif aspect is not None: ax.set_aspect(aspect) # GH 1555 # if legend_kwds set, copy so we don't update it in place if legend_kwds is not None: legend_kwds = legend_kwds.copy() if df.empty: warnings.warn( ""The GeoDataFrame you are attempting to plot is "" ""empty. Nothing has been displayed."", UserWarning, ) return ax if isinstance(markersize, str): markersize = df[markersize].values if column is None: return plot_series( df.geometry, cmap=cmap, color=color, ax=ax, figsize=figsize, markersize=markersize, aspect=aspect, **style_kwds ) # To accept pd.Series and np.arrays as column if isinstance(column, (np.ndarray, pd.Series)): if column.shape[0] != df.shape[0]: raise ValueError( ""The dataframe and given column have different number of rows."" ) else: values = column else: values = df[column] if pd.api.types.is_categorical_dtype(values.dtype): if categories is not None: raise ValueError( ""Cannot specify 'categories' when column has categorical dtype"" ) categorical = True elif values.dtype is np.dtype(""O"") or categories: categorical = True nan_idx = np.asarray(pd.isna(values), dtype=""bool"") # Define `values` as a Series if categorical: if cmap is None: cmap = ""tab10"" cat = pd.Categorical(values, categories=categories) categories = list(cat.categories) # values missing in the Categorical but not in original values missing = list(np.unique(values[~nan_idx & cat.isna()])) if missing: raise ValueError( ""Column contains values not listed in categories. "" ""Missing categories: {}."".format(missing) ) values = cat.codes[~nan_idx] vmin = 0 if vmin is None else vmin vmax = len(categories) - 1 if vmax is None else vmax if scheme is not None: if classification_kwds is None: classification_kwds = {} if ""k"" not in classification_kwds: classification_kwds[""k""] = k binning = _mapclassify_choro(values[~nan_idx], scheme, **classification_kwds) # set categorical to True for creating the legend categorical = True if legend_kwds is not None and ""labels"" in legend_kwds: if len(legend_kwds[""labels""]) != binning.k: raise ValueError( ""Number of labels must match number of bins, "" ""received {} labels for {} bins"".format( len(legend_kwds[""labels""]), binning.k ) ) else: categories = list(legend_kwds.pop(""labels"")) else: fmt = ""{:.2f}"" if legend_kwds is not None and ""fmt"" in legend_kwds: fmt = legend_kwds.pop(""fmt"") categories = binning.get_legend_classes(fmt) show_interval = True if legend_kwds is not None and ""interval"" in legend_kwds: show_interval = legend_kwds.pop(""interval"") if not show_interval: categories = [ c.replace(""("", """").replace("")"", """").replace(""["", """").replace(""]"", """") for c in categories ] values = np.array(binning.yb) # fill values with placeholder where were NaNs originally to map them properly # (after removing them in categorical or scheme) if categorical: for n in np.where(nan_idx)[0]: values = np.insert(values, n, values[0]) mn = values[~np.isnan(values)].min() if vmin is None else vmin mx = values[~np.isnan(values)].max() if vmax is None else vmax # decompose GeometryCollections geoms, multiindex = _flatten_multi_geoms(df.geometry, prefix=""Geom"") values = np.take(values, multiindex, axis=0) nan_idx = np.take(nan_idx, multiindex, axis=0) expl_series = geopandas.GeoSeries(geoms) geom_types = expl_series.type poly_idx = np.asarray((geom_types == ""Polygon"") | (geom_types == ""MultiPolygon"")) line_idx = np.asarray( (geom_types == ""LineString"") | (geom_types == ""MultiLineString"") | (geom_types == ""LinearRing"") ) point_idx = np.asarray((geom_types == ""Point"") | (geom_types == ""MultiPoint"")) # plot all Polygons and all MultiPolygon components in the same collection polys = expl_series[poly_idx & np.invert(nan_idx)] subset = values[poly_idx & np.invert(nan_idx)] if not polys.empty: _plot_polygon_collection( ax, polys, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds ) # plot all LineStrings and MultiLineString components in same collection lines = expl_series[line_idx & np.invert(nan_idx)] subset = values[line_idx & np.invert(nan_idx)] if not lines.empty: _plot_linestring_collection( ax, lines, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds ) # plot all Points in the same collection points = expl_series[point_idx & np.invert(nan_idx)] subset = values[point_idx & np.invert(nan_idx)] if not points.empty: if isinstance(markersize, np.ndarray): markersize = np.take(markersize, multiindex, axis=0) markersize = markersize[point_idx & np.invert(nan_idx)] _plot_point_collection( ax, points, subset, vmin=mn, vmax=mx, markersize=markersize, cmap=cmap, **style_kwds ) if missing_kwds is not None: if color: if ""color"" not in missing_kwds: missing_kwds[""color""] = color merged_kwds = style_kwds.copy() merged_kwds.update(missing_kwds) plot_series(expl_series[nan_idx], ax=ax, **merged_kwds) if legend and not color: if legend_kwds is None: legend_kwds = {} if ""fmt"" in legend_kwds: legend_kwds.pop(""fmt"") from matplotlib.lines import Line2D from matplotlib.colors import Normalize from matplotlib import cm norm = style_kwds.get(""norm"", None) if not norm: norm = Normalize(vmin=mn, vmax=mx) n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap) if categorical: patches = [] for value, cat in enumerate(categories): patches.append( Line2D( [0], [0], linestyle=""none"", marker=""o"", alpha=style_kwds.get(""alpha"", 1), markersize=10, markerfacecolor=n_cmap.to_rgba(value), markeredgewidth=0, ) ) if missing_kwds is not None: if ""color"" in merged_kwds: merged_kwds[""facecolor""] = merged_kwds[""color""] patches.append( Line2D( [0], [0], linestyle=""none"", marker=""o"", alpha=merged_kwds.get(""alpha"", 1), markersize=10, markerfacecolor=merged_kwds.get(""facecolor"", None), markeredgecolor=merged_kwds.get(""edgecolor"", None), markeredgewidth=merged_kwds.get( ""linewidth"", 1 if merged_kwds.get(""edgecolor"", False) else 0 ), ) ) categories.append(merged_kwds.get(""label"", ""NaN"")) legend_kwds.setdefault(""numpoints"", 1) legend_kwds.setdefault(""loc"", ""best"") ax.legend(patches, categories, **legend_kwds) else: if cax is not None: legend_kwds.setdefault(""cax"", cax) else: legend_kwds.setdefault(""ax"", ax) n_cmap.set_array([]) ax.get_figure().colorbar(n_cmap, **legend_kwds) plt.draw() return ax " 34374,"def run_interactive_learning( file_importer: TrainingDataImporter, skip_visualization: bool = False, conversation_id: Text = uuid.uuid4().hex, server_args: Dict[Text, Any] = None, ): """"""Start the interactive learning with the model of the agent."""""" global SAVE_IN_E2E server_args = server_args or {} if server_args.get(""nlu_data""): PATHS[""nlu""] = server_args[""nlu_data""] if server_args.get(""stories""): PATHS[""stories""] = server_args[""stories""] if server_args.get(""domain""): PATHS[""domain""] = server_args[""domain""] SAVE_IN_E2E = server_args[""e2e""] if not skip_visualization: p = Process(target=start_visualization, args=(DEFAULT_STORY_GRAPH_FILE,)) p.daemon = True p.start() else: p = None app = run.configure_app(enable_api=True) endpoints = AvailableEndpoints.read_endpoints(server_args.get(""endpoints"")) # before_server_start handlers make sure the agent is loaded before the # interactive learning IO starts app.register_listener( partial(run.load_agent_on_start, server_args.get(""model""), endpoints, None), ""before_server_start"", ) _serve_application(app, file_importer, skip_visualization, conversation_id) if not skip_visualization and p is not None: p.terminate() # pytype: disable=attribute-error p.join() # pytype: disable=attribute-error ","def run_interactive_learning( file_importer: TrainingDataImporter, skip_visualization: bool = False, conversation_id: Text = uuid.uuid4().hex, server_args: Dict[Text, Any] = None, ) -> None: """"""Start the interactive learning with the model of the agent."""""" global SAVE_IN_E2E server_args = server_args or {} if server_args.get(""nlu_data""): PATHS[""nlu""] = server_args[""nlu_data""] if server_args.get(""stories""): PATHS[""stories""] = server_args[""stories""] if server_args.get(""domain""): PATHS[""domain""] = server_args[""domain""] SAVE_IN_E2E = server_args[""e2e""] if not skip_visualization: p = Process(target=start_visualization, args=(DEFAULT_STORY_GRAPH_FILE,)) p.daemon = True p.start() else: p = None app = run.configure_app(enable_api=True) endpoints = AvailableEndpoints.read_endpoints(server_args.get(""endpoints"")) # before_server_start handlers make sure the agent is loaded before the # interactive learning IO starts app.register_listener( partial(run.load_agent_on_start, server_args.get(""model""), endpoints, None), ""before_server_start"", ) _serve_application(app, file_importer, skip_visualization, conversation_id) if not skip_visualization and p is not None: p.terminate() # pytype: disable=attribute-error p.join() # pytype: disable=attribute-error " 7462,"def test_deprecated_attribute(): class DummyClass: def __init__(self): self._foo = 42 self._bar = 4242 def set_private(self): self._foo = 100 self._bar = 1000 foo = deprecated_attribute('foo', '0.2') bar = deprecated_attribute('bar', '0.2', warning_type=NewDeprecationWarning) dummy = DummyClass() with catch_warnings(AstropyDeprecationWarning) as wfoo: xfoo = dummy.foo with catch_warnings(AstropyDeprecationWarning) as wbar: xbar = dummy.bar assert len(wfoo) == 1 assert str(wfoo[0].message) == (""The foo attribute is deprecated and may "" ""be removed in a future version."") assert wfoo[0].category == AstropyDeprecationWarning assert len(wbar) == 1 assert str(wbar[0].message) == (""The bar attribute is deprecated and may "" ""be removed in a future version."") assert wbar[0].category == NewDeprecationWarning with catch_warnings() as w: dummy.set_private() assert len(w) == 0 ","def test_deprecated_attribute(): class DummyClass: def __init__(self): self._foo = 42 self._bar = 4242 def set_private(self): self._foo = 100 self._bar = 1000 foo = deprecated_attribute('foo', '0.2') bar = deprecated_attribute('bar', '0.2', warning_type=NewDeprecationWarning) dummy = DummyClass() with catch_warnings(AstropyDeprecationWarning) as wfoo: xfoo = dummy.foo with catch_warnings(AstropyDeprecationWarning) as wbar: dummy.bar assert len(wfoo) == 1 assert str(wfoo[0].message) == (""The foo attribute is deprecated and may "" ""be removed in a future version."") assert wfoo[0].category == AstropyDeprecationWarning assert len(wbar) == 1 assert str(wbar[0].message) == (""The bar attribute is deprecated and may "" ""be removed in a future version."") assert wbar[0].category == NewDeprecationWarning with catch_warnings() as w: dummy.set_private() assert len(w) == 0 " 37829,"def test_shell_eval(): if sys.platform == ""win32"": env_string = 'VAR=""$(cmd /C ""echo a test string"")""' else: env_string = 'VAR=""$(echo ""a test"" string)""' environment_recipe = parse_environment(env_string) env_copy = os.environ.copy() env_copy.pop(""VAR"", None) environment_dict = environment_recipe.as_dictionary(prev_environment=env_copy) assert environment_dict[""VAR""] == ""a test string"" ","def test_shell_eval(): if sys.platform == ""win32"": env_string = 'VAR=""$(cmd /C echo ""a test"" string)""' else: env_string = 'VAR=""$(echo ""a test"" string)""' environment_recipe = parse_environment(env_string) env_copy = os.environ.copy() env_copy.pop(""VAR"", None) environment_dict = environment_recipe.as_dictionary(prev_environment=env_copy) assert environment_dict[""VAR""] == ""a test string"" " 35440,"def get_arg_parser(): parser = argparse.ArgumentParser(description=""PlotJuggler plugin for reading openpilot logs"", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument(""--qlog"", action=""store_true"", help=""Use qlogs"") parser.add_argument(""--can"", action=""store_true"", help=""Parse CAN data"") parser.add_argument(""--stream"", action=""store_true"", help=""Start PlotJuggler without a route to stream data using Cereal"") parser.add_argument(""--layout"", nargs='?', help=""Run PlotJuggler with a pre-defined layout"") parser.add_argument(""route_name"", nargs='?', help=""The route name to plot, cabana share url accepted"") parser.add_argument(""segment_number"", type=int, nargs='?', help=""The index of the segment to plot"") parser.add_argument(""segment_count"", type=int, nargs='?', help=""The number of segments to plot"", default=1) return parser ","def get_arg_parser(): parser = argparse.ArgumentParser(description=""PlotJuggler plugin for reading openpilot logs"", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument(""--qlog"", action=""store_true"", help=""Use qlogs"") parser.add_argument(""--can"", action=""store_true"", help=""Parse CAN data"") parser.add_argument(""--stream"", action=""store_true"", help=""Start PlotJuggler without a route to stream data using Cereal"") parser.add_argument(""--layout"", nargs='?', help=""Run PlotJuggler with a pre-defined layout"") parser.add_argument(""route_name"", nargs='?', help=""The route name to plot (cabana share URL accepted)"") parser.add_argument(""segment_number"", type=int, nargs='?', help=""The index of the segment to plot"") parser.add_argument(""segment_count"", type=int, nargs='?', help=""The number of segments to plot"", default=1) return parser " 43971,"def generate_fermionic_hamiltonian(mol, cutoff=1.0e-12): r""""""Return a function that computes the fermionic hamiltonian. Args: mol (Molecule): the molecule object cutoff (float): cutoff value for discarding the negligible electronic integrals Returns: function: function that computes the the fermionic hamiltonian **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False) >>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554], >>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True) >>> mol = Molecule(symbols, geometry, alpha=alpha) >>> args = [alpha] >>> h = generate_fermionic_hamiltonian(mol)(*args) >>> h[0] array([ 1. , -1.39021927, 0.35721954, 0.08512072, 0.35721954, 0.35721954, 0.08512072, 0.08512072, 0.08512072, 0.35092658, 0.08512072, 0.08512072, 0.35092658, 0.35092658, -1.39021927, 0.35721954, 0.08512072, 0.08512072, 0.35092658, 0.35092658, 0.08512072, 0.35092658, 0.35092658, 0.08512072, 0.08512072, -0.29165331, 0.08512072, 0.36941834, 0.08512072, 0.08512072, 0.36941834, 0.36941834, 0.35092658, 0.08512072, -0.29165331, 0.08512072, 0.36941834]) """""" def fermionic_hamiltonian(*args): r""""""Compute the fermionic hamiltonian. Args: args (array[array[float]]): initial values of the differentiable parameters Returns: tuple(array[float], list[list[int]]): the Hamiltonian coefficients and operators """""" e_core, one, two = generate_electron_integrals(mol)(*args) e_core = anp.array([e_core]) indices_one = anp.argwhere(abs(one) >= cutoff) operators_one = (indices_one * 2).tolist() + ( indices_one * 2 + 1 ).tolist() # up-up + down-down terms coeffs_one = anp.tile(one[abs(one) >= cutoff], 2) indices_two = anp.argwhere(abs(two) >= cutoff) n = len(indices_two) operators_two = ( [(indices_two[i] * 2).tolist() for i in range(n)] # up-up-up-up term + [ (indices_two[i] * 2 + [0, 1, 1, 0]).tolist() for i in range(n) ] # up-down-down-up term + [ (indices_two[i] * 2 + [1, 0, 0, 1]).tolist() for i in range(n) ] # down-up-up-down term + [(indices_two[i] * 2 + 1).tolist() for i in range(n)] # down-down-down-down term ) coeffs_two = anp.tile(two[abs(two) >= cutoff], 4) / 2 coeffs = anp.concatenate((e_core, coeffs_one, coeffs_two)) operators = [[]] + operators_one + operators_two indices_sort = [operators.index(i) for i in sorted(operators)] return coeffs[indices_sort], sorted(operators) return fermionic_hamiltonian ","def generate_fermionic_hamiltonian(mol, cutoff=1.0e-12): r""""""Return a function that computes the fermionic hamiltonian. Args: mol (Molecule): the molecule object cutoff (float): cutoff value for discarding the negligible electronic integrals Returns: function: function that computes the fermionic hamiltonian **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False) >>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554], >>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True) >>> mol = Molecule(symbols, geometry, alpha=alpha) >>> args = [alpha] >>> h = generate_fermionic_hamiltonian(mol)(*args) >>> h[0] array([ 1. , -1.39021927, 0.35721954, 0.08512072, 0.35721954, 0.35721954, 0.08512072, 0.08512072, 0.08512072, 0.35092658, 0.08512072, 0.08512072, 0.35092658, 0.35092658, -1.39021927, 0.35721954, 0.08512072, 0.08512072, 0.35092658, 0.35092658, 0.08512072, 0.35092658, 0.35092658, 0.08512072, 0.08512072, -0.29165331, 0.08512072, 0.36941834, 0.08512072, 0.08512072, 0.36941834, 0.36941834, 0.35092658, 0.08512072, -0.29165331, 0.08512072, 0.36941834]) """""" def fermionic_hamiltonian(*args): r""""""Compute the fermionic hamiltonian. Args: args (array[array[float]]): initial values of the differentiable parameters Returns: tuple(array[float], list[list[int]]): the Hamiltonian coefficients and operators """""" e_core, one, two = generate_electron_integrals(mol)(*args) e_core = anp.array([e_core]) indices_one = anp.argwhere(abs(one) >= cutoff) operators_one = (indices_one * 2).tolist() + ( indices_one * 2 + 1 ).tolist() # up-up + down-down terms coeffs_one = anp.tile(one[abs(one) >= cutoff], 2) indices_two = anp.argwhere(abs(two) >= cutoff) n = len(indices_two) operators_two = ( [(indices_two[i] * 2).tolist() for i in range(n)] # up-up-up-up term + [ (indices_two[i] * 2 + [0, 1, 1, 0]).tolist() for i in range(n) ] # up-down-down-up term + [ (indices_two[i] * 2 + [1, 0, 0, 1]).tolist() for i in range(n) ] # down-up-up-down term + [(indices_two[i] * 2 + 1).tolist() for i in range(n)] # down-down-down-down term ) coeffs_two = anp.tile(two[abs(two) >= cutoff], 4) / 2 coeffs = anp.concatenate((e_core, coeffs_one, coeffs_two)) operators = [[]] + operators_one + operators_two indices_sort = [operators.index(i) for i in sorted(operators)] return coeffs[indices_sort], sorted(operators) return fermionic_hamiltonian " 15685,"def legacy_supported_features( supported_features: int, supported_color_modes: list[str] | None ) -> int: """"""Calculate supported features with backwards compatibility."""""" # Backwards compatibility for supported_color_modes added in 2021.4 if supported_color_modes is None: supported_color_modes = [] if any(mode in supported_color_modes for mode in COLOR_MODES_COLOR): supported_features |= SUPPORT_COLOR if any(mode in supported_color_modes for mode in COLOR_MODES_BRIGHTNESS): supported_features |= SUPPORT_BRIGHTNESS if COLOR_MODE_COLOR_TEMP in supported_color_modes: supported_features |= SUPPORT_COLOR_TEMP return supported_features ","def legacy_supported_features( supported_features: int, supported_color_modes: list[str] | None ) -> int: """"""Calculate supported features with backwards compatibility."""""" # Backwards compatibility for supported_color_modes added in 2021.4 if supported_color_modes is None: return supported_features if any(mode in supported_color_modes for mode in COLOR_MODES_COLOR): supported_features |= SUPPORT_COLOR if any(mode in supported_color_modes for mode in COLOR_MODES_BRIGHTNESS): supported_features |= SUPPORT_BRIGHTNESS if COLOR_MODE_COLOR_TEMP in supported_color_modes: supported_features |= SUPPORT_COLOR_TEMP return supported_features " 887,"def sample(expr, condition=None, size=(), library='scipy', numsamples=1, seed=None, **kwargs): """""" A realization of the random expression Parameters ========== expr : Expression of random variables Expression from which sample is extracted condition : Expr containing RandomSymbols A conditional expression size : int, tuple Represents size of each sample in numsamples library : str - 'scipy' : Sample using scipy - 'numpy' : Sample using numpy - 'pymc3' : Sample using PyMC3 Choose any of the available options to sample from as string, by default is 'scipy' numsamples : int Number of samples, each with size as ``size`` seed : An object(like int, numpy.random.RandomState, numpy.random.default_rng) to be used as seed by the given external library for sampling `expr`. Optional, by default None, in which case seed settings related to the given library will be used. No modifications to environment's global seed settings are done by this argument. Examples ======== >>> from sympy.stats import Die, sample, Normal, Geometric >>> X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6) # Finite Random Variable >>> die_roll = sample(X + Y + Z) # doctest: +SKIP >>> next(die_roll) # doctest: +SKIP 6 >>> N = Normal('N', 3, 4) # Continuous Random Variable >>> samp = next(sample(N)) # doctest: +SKIP >>> samp in N.pspace.domain.set # doctest: +SKIP True >>> samp = next(sample(N, N>0)) # doctest: +SKIP >>> samp > 0 # doctest: +SKIP True >>> samp_list = next(sample(N, size=4)) # doctest: +SKIP >>> [sam in N.pspace.domain.set for sam in samp_list] # doctest: +SKIP [True, True, True, True] >>> G = Geometric('G', 0.5) # Discrete Random Variable >>> samp_list = next(sample(G, size=3)) # doctest: +SKIP >>> samp_list # doctest: +SKIP array([10, 4, 1]) >>> [sam in G.pspace.domain.set for sam in samp_list] # doctest: +SKIP [True, True, True] >>> MN = Normal(""MN"", [3, 4], [[2, 1], [1, 2]]) # Joint Random Variable >>> samp_list = next(sample(MN, size=4)) # doctest: +SKIP >>> samp_list # doctest: +SKIP array([[4.22564264, 3.23364418], [3.41002011, 4.60090908], [3.76151866, 4.77617143], [4.71440865, 2.65714157]]) >>> [tuple(sam) in MN.pspace.domain.set for sam in samp_list] # doctest: +SKIP [True, True, True, True] Returns ======= sample: iterator object iterator object containing the sample/samples of given expr """""" ### TODO: Remove the user warnings in the future releases message = (""The return type of sample has been changed to return an "" ""iterator object since version 1.7. For more information see "" ""https://github.com/sympy/sympy/issues/19061"") warnings.warn(filldedent(message)) return sample_iter(expr, condition, size=size, library=library, numsamples=numsamples, seed=seed) ","def sample(expr, condition=None, size=(), library='scipy', numsamples=1, seed=None, **kwargs): """""" A realization of the random expression Parameters ========== expr : Expression of random variables Expression from which sample is extracted condition : Expr containing RandomSymbols A conditional expression size : int, tuple Represents size of each sample in numsamples library : str - 'scipy' : Sample using scipy - 'numpy' : Sample using numpy - 'pymc3' : Sample using PyMC3 Choose any of the available options to sample from as string, by default is 'scipy' numsamples : int Number of samples, each with size as ``size`` seed : An object (like int, numpy.random.RandomState, numpy.random.default_rng) to be used as seed by the given external library for sampling `expr`. Optional, by default None, in which case seed settings related to the given library will be used. No modifications to environment's global seed settings are done by this argument. Examples ======== >>> from sympy.stats import Die, sample, Normal, Geometric >>> X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6) # Finite Random Variable >>> die_roll = sample(X + Y + Z) # doctest: +SKIP >>> next(die_roll) # doctest: +SKIP 6 >>> N = Normal('N', 3, 4) # Continuous Random Variable >>> samp = next(sample(N)) # doctest: +SKIP >>> samp in N.pspace.domain.set # doctest: +SKIP True >>> samp = next(sample(N, N>0)) # doctest: +SKIP >>> samp > 0 # doctest: +SKIP True >>> samp_list = next(sample(N, size=4)) # doctest: +SKIP >>> [sam in N.pspace.domain.set for sam in samp_list] # doctest: +SKIP [True, True, True, True] >>> G = Geometric('G', 0.5) # Discrete Random Variable >>> samp_list = next(sample(G, size=3)) # doctest: +SKIP >>> samp_list # doctest: +SKIP array([10, 4, 1]) >>> [sam in G.pspace.domain.set for sam in samp_list] # doctest: +SKIP [True, True, True] >>> MN = Normal(""MN"", [3, 4], [[2, 1], [1, 2]]) # Joint Random Variable >>> samp_list = next(sample(MN, size=4)) # doctest: +SKIP >>> samp_list # doctest: +SKIP array([[4.22564264, 3.23364418], [3.41002011, 4.60090908], [3.76151866, 4.77617143], [4.71440865, 2.65714157]]) >>> [tuple(sam) in MN.pspace.domain.set for sam in samp_list] # doctest: +SKIP [True, True, True, True] Returns ======= sample: iterator object iterator object containing the sample/samples of given expr """""" ### TODO: Remove the user warnings in the future releases message = (""The return type of sample has been changed to return an "" ""iterator object since version 1.7. For more information see "" ""https://github.com/sympy/sympy/issues/19061"") warnings.warn(filldedent(message)) return sample_iter(expr, condition, size=size, library=library, numsamples=numsamples, seed=seed) " 1700,"def plot_confusion_matrix(estimator, X, y_true, labels=None, sample_weight=None, normalize=None, display_labels=None, include_values=True, xticks_rotation='horizontal', values_format=None, cmap='viridis', ax=None): """"""Plot Confusion Matrix. Read more in the :ref:`User Guide `. Parameters ---------- estimator : estimator instance Trained classifier. X : {array-like, sparse matrix} of shape (n_samples, n_features) Input values. y : array-like of shape (n_samples,) Target values. labels : array-like of shape (n_classes,), default=None List of labels to index the matrix. This may be used to reorder or select a subset of labels. If `None` is given, those that appear at least once in `y_true` or `y_pred` are used in sorted order. sample_weight : array-like of shape (n_samples,), default=None Sample weights. normalize : {'true', 'pred', 'all'}, default=None Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized. display_labels : array-like of shape (n_classes,), default=None Target names used for plotting. By default, `labels` will be used if it is defined, otherwise the unique labels of `y_true` and `y_pred` will be used. include_values : bool, default=True Includes values in confusion matrix. xticks_rotation : {'vertical', 'horizontal'} or float, \ default='horizontal' Rotation of xtick labels. values_format : str, default=None Format specification for values in confusion matrix. If `None`, the format specification is '.2g'. cmap : str or matplotlib Colormap, default='viridis' Colormap recognized by matplotlib. ax : matplotlib Axes, default=None Axes object to plot on. If `None`, a new figure and axes is created. Returns ------- display : :class:`~sklearn.metrics.ConfusionMatrixDisplay` Examples -------- >>> import matplotlib.pyplot as plt >>> from sklearn.datasets import make_classification >>> from sklearn.metrics import plot_confusion_matrix >>> from sklearn.model_selection import train_test_split >>> from sklearn.svm import SVC >>> X, y = make_classification(random_state=0) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, random_state=0) >>> clf = SVC(random_state=0) >>> clf.fit(X_train, y_train) >>> plot_confusion_matrix(clf, X_test, y_test) >>> plt.show() """""" check_matplotlib_support(""plot_confusion_matrix"") if not is_classifier(estimator): raise ValueError(""plot_confusion_matrix only supports classifiers"") y_pred = estimator.predict(X) cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight, labels=labels, normalize=normalize) if display_labels is None: if labels is None: display_labels = estimator.classes_ else: display_labels = labels disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display_labels) return disp.plot(include_values=include_values, cmap=cmap, ax=ax, xticks_rotation=xticks_rotation, values_format=values_format) ","def plot_confusion_matrix(estimator, X, y_true, labels=None, sample_weight=None, normalize=None, display_labels=None, include_values=True, xticks_rotation='horizontal', values_format=None, cmap='viridis', ax=None): """"""Plot Confusion Matrix. Read more in the :ref:`User Guide `. Parameters ---------- estimator : estimator instance Trained classifier. X : {array-like, sparse matrix} of shape (n_samples, n_features) Input values. y : array-like of shape (n_samples,) Target values. labels : array-like of shape (n_classes,), default=None List of labels to index the matrix. This may be used to reorder or select a subset of labels. If `None` is given, those that appear at least once in `y_true` or `y_pred` are used in sorted order. sample_weight : array-like of shape (n_samples,), default=None Sample weights. normalize : {'true', 'pred', 'all'}, default=None Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized. display_labels : array-like of shape (n_classes,), default=None Target names used for plotting. By default, `labels` will be used if it is defined, otherwise the unique labels of `y_true` and `y_pred` will be used. include_values : bool, default=True Includes values in confusion matrix. xticks_rotation : {'vertical', 'horizontal'} or float, \ default='horizontal' Rotation of xtick labels. values_format : str, default=None Format specification for values in confusion matrix. If `None`, the format specification is '.2g'. cmap : str or matplotlib Colormap, default='viridis' Colormap recognized by matplotlib. ax : matplotlib Axes, default=None Axes object to plot on. If `None`, a new figure and axes is created. Returns ------- display : :class:`~sklearn.metrics.ConfusionMatrixDisplay` Examples -------- >>> import matplotlib.pyplot as plt # doctest: +SKIP >>> from sklearn.datasets import make_classification >>> from sklearn.metrics import plot_confusion_matrix >>> from sklearn.model_selection import train_test_split >>> from sklearn.svm import SVC >>> X, y = make_classification(random_state=0) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, random_state=0) >>> clf = SVC(random_state=0) >>> clf.fit(X_train, y_train) >>> plot_confusion_matrix(clf, X_test, y_test) >>> plt.show() """""" check_matplotlib_support(""plot_confusion_matrix"") if not is_classifier(estimator): raise ValueError(""plot_confusion_matrix only supports classifiers"") y_pred = estimator.predict(X) cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight, labels=labels, normalize=normalize) if display_labels is None: if labels is None: display_labels = estimator.classes_ else: display_labels = labels disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display_labels) return disp.plot(include_values=include_values, cmap=cmap, ax=ax, xticks_rotation=xticks_rotation, values_format=values_format) " 42415,"def crop_image(raster, geoms, all_touched=True): """"""Crop a single file using geometry objects. Parameters ---------- raster : rasterio.io.DatasetReader object The rasterio object to be cropped. geoms : geopandas geodataframe or list of polygons The spatial polygon boundaries in GeoJSON-like dict format to be used to crop the image. All data outside of the polygon boundaries will be set to nodata and/or removed from the image. all_touched : bool (default=True) Include a pixel in the mask if it touches any of the shapes. If False, include a pixel only if its center is within one of the shapes, or if it is selected by Bresenham's line algorithm. (from rasterio) Returns ---------- tuple out_image: cropped numpy array A numpy array that is cropped to the geoms object extent with shape (bands, rows, columns) out_meta: dict A dictionary containing updated metadata for the cropped raster, including extent (shape elements) and transform properties. Example ------- >>> import geopandas as gpd >>> import rasterio as rio >>> import earthpy.spatial as es >>> from earthpy.io import path_to_example >>> # Clip an RGB image to the extent of Rocky Mountain National Park >>> rmnp = gpd.read_file(path_to_example(""rmnp.shp"")) >>> with rio.open(path_to_example(""rmnp-rgb.tif"")) as src: ... in_image = src.read() ... out_image, out_meta = es.crop_image(src, rmnp) >>> in_image.shape (3, 373, 485) >>> out_image.shape (3, 265, 281) """""" if isinstance(geoms, gpd.geodataframe.GeoDataFrame): clip_extent = [extent_to_json(geoms)] else: clip_extent = geoms out_image, out_transform = mask( raster, clip_extent, crop=True, all_touched=all_touched ) out_meta = raster.meta.copy() out_meta.update( { ""driver"": ""GTiff"", ""height"": out_image.shape[1], ""width"": out_image.shape[2], ""transform"": out_transform, } ) return out_image, out_meta ","def crop_image(raster, geoms, all_touched=True): """"""Crop a single file using geometry objects. Parameters ---------- raster : rasterio.io.DatasetReader object The rasterio object to be cropped. geoms : geopandas geodataframe or list of polygons The spatial polygon boundaries in GeoJSON-like dict format to be used to crop the image. All data outside of the polygon boundaries will be set to nodata and/or removed from the image. all_touched : bool (default=True) Include a pixel in the mask if it touches any of the shapes. If False, include a pixel only if its center is within one of the shapes, or if it is selected by Bresenham's line algorithm. (from rasterio) Returns ---------- tuple out_image: cropped numpy array A numpy array that is cropped to the geoms object extent with shape (bands, rows, columns) out_meta: dict A dictionary containing updated metadata for the cropped raster, including extent (shape elements) and transform properties. Example ------- >>> import geopandas as gpd >>> import rasterio as rio >>> import earthpy.spatial as es >>> from earthpy.io import path_to_example >>> # Clip an RGB image to the extent of Rocky Mountain National Park >>> rmnp = gpd.read_file(path_to_example(""rmnp.shp"")) >>> with rio.open(path_to_example(""rmnp-rgb.tif"")) as src_raster: ... in_image = src.read() ... out_image, out_meta = es.crop_image(src, rmnp) >>> in_image.shape (3, 373, 485) >>> out_image.shape (3, 265, 281) """""" if isinstance(geoms, gpd.geodataframe.GeoDataFrame): clip_extent = [extent_to_json(geoms)] else: clip_extent = geoms out_image, out_transform = mask( raster, clip_extent, crop=True, all_touched=all_touched ) out_meta = raster.meta.copy() out_meta.update( { ""driver"": ""GTiff"", ""height"": out_image.shape[1], ""width"": out_image.shape[2], ""transform"": out_transform, } ) return out_image, out_meta " 29704,"def _terminate_process(proc: subprocess.Popen): if proc.poll() is None: if sys.platform.startswith(""win""): proc.send_signal(signal.CTRL_BREAK_EVENT) else: proc.send_signal(signal.SIGINT) try: proc.communicate(timeout=30) finally: # Make sure we don't leave the process lingering around with suppress(OSError): proc.kill() ","def _terminate_process(proc: subprocess.Popen) -> None: if proc.poll() is None: if sys.platform.startswith(""win""): proc.send_signal(signal.CTRL_BREAK_EVENT) else: proc.send_signal(signal.SIGINT) try: proc.communicate(timeout=30) finally: # Make sure we don't leave the process lingering around with suppress(OSError): proc.kill() " 25142,"def _looks_like_typedDict( # pylint: disable=invalid-name node: nodes.FunctionDef, ) -> bool: """"""Check if node is TypedDict FunctionDef."""""" if isinstance(node, nodes.FunctionDef) and node.name == ""TypedDict"": return True return False ","def _looks_like_typedDict( # pylint: disable=invalid-name node: nodes.FunctionDef, ) -> bool: """"""Check if node is TypedDict FunctionDef."""""" return isinstance(node, nodes.FunctionDef) and node.name == ""TypedDict"" " 52753,"def rec_iter(zeek_parser, sensor, ignore_rules): for line in zeek_parser: line[""timestamp""] = line.pop(""ts"") # skip PassiveRecon:: line[""recon_type""] = line[""recon_type""][14:] for rec in ivre.passive.handle_rec( sensor, ignore_rules.get('IGNORENETS', {}), ignore_rules.get('NEVERIGNORE', {}), **line ): yield rec ","def rec_iter(zeek_parser, sensor, ignore_rules): for line in zeek_parser: line[""timestamp""] = line.pop(""ts"") # skip PassiveRecon:: line[""recon_type""] = line[""recon_type""][14:] yield from ivre.passive.handle_rec( sensor, ignore_rules.get('IGNORENETS', {}), ignore_rules.get('NEVERIGNORE', {}), **line ) " 56237,"def render_routine(line): """"""Function for rendering single formula Args: line (tuple): formula idx, formula string, path to store rendered image """""" formula, file_idx, folder_path = line output_path = Path(folder_path, file_idx) pre_name = os.path.normcase(output_path).replace('/', '_').replace('.', '_') formula = preprocess_formula(formula) if not output_path.exists(): tex_filename = Path(folder_path, pre_name + '.tex') log_filename = tex_filename.with_name(pre_name + '.log') aux_filename = tex_filename.with_name(pre_name + '.aux') with open(str(tex_filename), ""w"") as w: w.write(template % formula) subprocess.run(['pdflatex', '-interaction=nonstopmode', '-output-directory', folder_path, str(tex_filename)], check=False, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt') for filename in (tex_filename, log_filename, aux_filename): if filename.exists(): filename.unlink() pdf_filename = tex_filename.with_name(pre_name + '.pdf') png_filename = tex_filename.with_name(pre_name + '.png') if not pdf_filename.exists(): print_info('ERROR: {} cannot compile\n'.format(file_idx)) else: subprocess.run(['convert', '+profile', '""icc""', '-density', '200', '-quality', '100', str(pdf_filename), str(png_filename)], check=True, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt') if pdf_filename.exists(): pdf_filename.unlink() if png_filename.exists(): crop_image(str(png_filename), str(output_path)) png_filename.unlink() else: print_info(""ERROR: {png_filename} does not exists"".format(png_filename=png_filename)) ","def render_routine(line): """"""Function for rendering single formula Args: line (tuple): formula idx, formula string, path to store rendered image """""" formula, file_idx, folder_path = line output_path = Path(folder_path, file_idx) pre_name = os.path.normcase(output_path).replace('/', '_').replace('.', '_') formula = preprocess_formula(formula) if not output_path.exists(): tex_filename = Path(folder_path, pre_name + '.tex') log_filename = tex_filename.with_name(pre_name + '.log') aux_filename = tex_filename.with_name(pre_name + '.aux') with open(str(tex_filename), ""w"") as w: w.write(template % formula) subprocess.run(['pdflatex', '-interaction=nonstopmode', '-output-directory', folder_path, str(tex_filename)], check=False, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt') for filename in (tex_filename, log_filename, aux_filename): if filename.exists(): filename.unlink() pdf_filename = tex_filename.with_suffix('.pdf') png_filename = tex_filename.with_suffix('.png') if not pdf_filename.exists(): print_info('ERROR: {} cannot compile\n'.format(file_idx)) else: subprocess.run(['convert', '+profile', '""icc""', '-density', '200', '-quality', '100', str(pdf_filename), str(png_filename)], check=True, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt') if pdf_filename.exists(): pdf_filename.unlink() if png_filename.exists(): crop_image(str(png_filename), str(output_path)) png_filename.unlink() else: print_info(""ERROR: {png_filename} does not exists"".format(png_filename=png_filename)) " 29500,"def user_activity_link(email: str) -> mark_safe: url = reverse(get_user_activity, kwargs=dict(email=email)) email_link = escape(f'{email}') return mark_safe(email_link) ","def user_activity_link(email: str) -> mark_safe: url = reverse(get_user_activity, kwargs=dict(email=email)) email_link = f'{escape(email)}' return mark_safe(email_link) " 20071,"def setup_resources(api): resources_endpoints = { 'Blueprints': 'blueprints', 'BlueprintsId': 'blueprints/', 'BlueprintsIdValidate': 'blueprints//validate', 'BlueprintsIdArchive': 'blueprints//archive', 'BlueprintsSetGlobal': 'blueprints//set-global', 'BlueprintsSetVisibility': 'blueprints//' 'set-visibility', 'Snapshots': 'snapshots', 'SnapshotsId': 'snapshots/', 'SnapshotsIdArchive': 'snapshots//archive', 'SnapshotsIdRestore': 'snapshots//restore', 'SnapshotsStatus': 'snapshot-status', 'Executions': 'executions', 'ExecutionsId': 'executions/', 'Deployments': 'deployments', 'DeploymentsId': 'deployments/', 'DeploymentsSetSite': 'deployments//set-site', 'DeploymentsIdOutputs': 'deployments//outputs', 'DeploymentsIdCapabilities': 'deployments//capabilities', 'DeploymentsSetVisibility': 'deployments//' 'set-visibility', 'InterDeploymentDependencies': 'deployments/inter-deployment-dependencies', 'DeploymentModifications': 'deployment-modifications', 'DeploymentModificationsId': 'deployment-modifications/' '', 'DeploymentModificationsIdFinish': 'deployment-modifications/' '/finish', 'DeploymentModificationsIdRollback': 'deployment-modifications/' '/' 'rollback', 'Nodes': 'nodes', 'NodeInstances': 'node-instances', 'NodeInstancesId': 'node-instances/', 'Events': 'events', 'Status': 'status', 'ProviderContext': 'provider/context', 'Version': 'version', 'EvaluateFunctions': 'evaluate/functions', 'Tokens': 'tokens', 'Plugins': 'plugins', 'PluginsId': 'plugins/', 'PluginsUpdate': 'plugins-updates//update/', 'PluginsUpdateId': 'plugins-updates/', 'PluginsUpdates': 'plugins-updates', 'PluginsArchive': 'plugins//archive', 'PluginsSetGlobal': 'plugins//set-global', 'PluginsSetVisibility': 'plugins//set-visibility', 'MaintenanceMode': 'maintenance', 'MaintenanceModeAction': 'maintenance/', 'DeploymentUpdate': 'deployment-updates//update/', 'DeploymentUpdateId': 'deployment-updates/', 'DeploymentUpdates': 'deployment-updates', 'Tenants': 'tenants', 'TenantsId': 'tenants/', 'TenantUsers': 'tenants/users', 'TenantGroups': 'tenants/user-groups', 'UserGroups': 'user-groups', 'UserGroupsId': 'user-groups/', 'UserGroupsUsers': 'user-groups/users', 'User': 'user', 'Users': 'users', 'UsersId': 'users/', 'UsersActive': 'users/active/', 'UsersUnlock': 'users/unlock/', 'FileServerAuth': 'file-server-auth', 'LdapAuthentication': 'ldap', 'SSLConfig': 'ssl', 'Secrets': 'secrets', 'SecretsExport': 'secrets/share/export', 'SecretsImport': 'secrets/share/import', 'SecretsKey': 'secrets/', 'SecretsSetGlobal': 'secrets//set-global', 'SecretsSetVisibility': 'secrets//set-visibility', 'ManagerConfig': 'config', 'ManagerConfigId': 'config/', 'Managers': 'managers', 'ManagersId': 'managers/', 'Agents': 'agents', 'AgentsName': 'agents/', 'SummarizeDeployments': 'summary/deployments', 'SummarizeNodes': 'summary/nodes', 'SummarizeNodeInstances': 'summary/node_instances', 'SummarizeExecutions': 'summary/executions', 'SummarizeBlueprints': 'summary/blueprints', 'UserTokens': 'user-tokens/', 'Operations': 'operations', 'OperationsId': 'operations/', 'TasksGraphs': 'tasks_graphs', 'TasksGraphsId': 'tasks_graphs/', 'ExecutionsCheck': 'executions//should-start', 'RabbitMQBrokers': 'brokers', 'DBNodes': 'db-nodes', 'RabbitMQBrokersId': 'brokers/', 'License': 'license', 'Sites': 'sites', 'SitesName': 'sites/', 'ClusterStatus': 'cluster-status', 'DeploymentsLabels': 'labels/deployments', 'DeploymnetsLabelsKey': 'labels/deployments/' } # Set version endpoint as a non versioned endpoint api.add_resource(resources_v1.Version, '/api/version', endpoint='version') for resource, endpoint_suffix in resources_endpoints.items(): _set_versioned_urls(api, resource, endpoint_suffix) ","def setup_resources(api): resources_endpoints = { 'Blueprints': 'blueprints', 'BlueprintsId': 'blueprints/', 'BlueprintsIdValidate': 'blueprints//validate', 'BlueprintsIdArchive': 'blueprints//archive', 'BlueprintsSetGlobal': 'blueprints//set-global', 'BlueprintsSetVisibility': 'blueprints//' 'set-visibility', 'Snapshots': 'snapshots', 'SnapshotsId': 'snapshots/', 'SnapshotsIdArchive': 'snapshots//archive', 'SnapshotsIdRestore': 'snapshots//restore', 'SnapshotsStatus': 'snapshot-status', 'Executions': 'executions', 'ExecutionsId': 'executions/', 'Deployments': 'deployments', 'DeploymentsId': 'deployments/', 'DeploymentsSetSite': 'deployments//set-site', 'DeploymentsIdOutputs': 'deployments//outputs', 'DeploymentsIdCapabilities': 'deployments//capabilities', 'DeploymentsSetVisibility': 'deployments//' 'set-visibility', 'InterDeploymentDependencies': 'deployments/inter-deployment-dependencies', 'DeploymentModifications': 'deployment-modifications', 'DeploymentModificationsId': 'deployment-modifications/' '', 'DeploymentModificationsIdFinish': 'deployment-modifications/' '/finish', 'DeploymentModificationsIdRollback': 'deployment-modifications/' '/' 'rollback', 'Nodes': 'nodes', 'NodeInstances': 'node-instances', 'NodeInstancesId': 'node-instances/', 'Events': 'events', 'Status': 'status', 'ProviderContext': 'provider/context', 'Version': 'version', 'EvaluateFunctions': 'evaluate/functions', 'Tokens': 'tokens', 'Plugins': 'plugins', 'PluginsId': 'plugins/', 'PluginsUpdate': 'plugins-updates//update/', 'PluginsUpdateId': 'plugins-updates/', 'PluginsUpdates': 'plugins-updates', 'PluginsArchive': 'plugins//archive', 'PluginsSetGlobal': 'plugins//set-global', 'PluginsSetVisibility': 'plugins//set-visibility', 'MaintenanceMode': 'maintenance', 'MaintenanceModeAction': 'maintenance/', 'DeploymentUpdate': 'deployment-updates//update/', 'DeploymentUpdateId': 'deployment-updates/', 'DeploymentUpdates': 'deployment-updates', 'Tenants': 'tenants', 'TenantsId': 'tenants/', 'TenantUsers': 'tenants/users', 'TenantGroups': 'tenants/user-groups', 'UserGroups': 'user-groups', 'UserGroupsId': 'user-groups/', 'UserGroupsUsers': 'user-groups/users', 'User': 'user', 'Users': 'users', 'UsersId': 'users/', 'UsersActive': 'users/active/', 'UsersUnlock': 'users/unlock/', 'FileServerAuth': 'file-server-auth', 'LdapAuthentication': 'ldap', 'SSLConfig': 'ssl', 'Secrets': 'secrets', 'SecretsExport': 'secrets/share/export', 'SecretsImport': 'secrets/share/import', 'SecretsKey': 'secrets/', 'SecretsSetGlobal': 'secrets//set-global', 'SecretsSetVisibility': 'secrets//set-visibility', 'ManagerConfig': 'config', 'ManagerConfigId': 'config/', 'Managers': 'managers', 'ManagersId': 'managers/', 'Agents': 'agents', 'AgentsName': 'agents/', 'SummarizeDeployments': 'summary/deployments', 'SummarizeNodes': 'summary/nodes', 'SummarizeNodeInstances': 'summary/node_instances', 'SummarizeExecutions': 'summary/executions', 'SummarizeBlueprints': 'summary/blueprints', 'UserTokens': 'user-tokens/', 'Operations': 'operations', 'OperationsId': 'operations/', 'TasksGraphs': 'tasks_graphs', 'TasksGraphsId': 'tasks_graphs/', 'ExecutionsCheck': 'executions//should-start', 'RabbitMQBrokers': 'brokers', 'DBNodes': 'db-nodes', 'RabbitMQBrokersId': 'brokers/', 'License': 'license', 'Sites': 'sites', 'SitesName': 'sites/', 'ClusterStatus': 'cluster-status', 'DeploymentsLabels': 'labels/deployments', 'DeploymentsLabelsKey': 'labels/deployments/' } # Set version endpoint as a non versioned endpoint api.add_resource(resources_v1.Version, '/api/version', endpoint='version') for resource, endpoint_suffix in resources_endpoints.items(): _set_versioned_urls(api, resource, endpoint_suffix) " 37839,"def test_clib_name(): ""Make sure we get the correct library name for different OS names"" for linux in [""linux"", ""linux2"", ""linux3""]: assert clib_name(linux) == ""libgmt.so"" assert clib_name(""darwin"") == ""libgmt.dylib"" assert clib_name(""win32"", True) == ""gmt_w64.dll"" assert clib_name(""win32"", False) == ""gmt_w32.dll"" with pytest.raises(GMTOSError): clib_name(""meh"") ","def test_clib_name(): ""Make sure we get the correct library name for different OS names"" for linux in [""linux"", ""linux2"", ""linux3""]: assert clib_name(linux) == ""libgmt.so"" assert clib_name(""darwin"") == ""libgmt.dylib"" assert clib_name(""win32"", is_64bit=True) == ""gmt_w64.dll"" assert clib_name(""win32"", False) == ""gmt_w32.dll"" with pytest.raises(GMTOSError): clib_name(""meh"") " 912,"def multiset_permutations(m, size=None, g=None): """""" Return the unique permutations of multiset ``m``. Examples ======== >>> from sympy.utilities.iterables import multiset_permutations >>> from sympy import factorial >>> [''.join(i) for i in multiset_permutations('aab')] ['aab', 'aba', 'baa'] >>> factorial(len('banana')) 720 >>> len(list(multiset_permutations('banana'))) 60 """""" if g is None: if type(m) is dict: g = [[k, m[k]] for k in ordered(m)] else: m = list(ordered(m)) g = [list(i) for i in group(m, multiple=False)] del m do = [gi for gi in g if gi[1] > 0] SUM = sum([gi[1] for gi in do]) if not do or size is not None and (size > SUM or size < 1): if not do and size is None or size < 1: yield [] return elif size == 1: for k, v in do: yield [k] elif len(do) == 1: k, v = do[0] v = v if size is None else (size if size <= v else 0) yield [k for i in range(v)] elif all(v == 1 for k, v in do): for p in permutations([k for k, v in do], size): yield list(p) else: size = size if size is not None else SUM for i, (k, v) in enumerate(do): do[i][1] -= 1 for j in multiset_permutations(None, size - 1, do): if j: yield [k] + j do[i][1] += 1 ","def multiset_permutations(m, size=None, g=None): """""" Return the unique permutations of multiset ``m``. Examples ======== >>> from sympy.utilities.iterables import multiset_permutations >>> from sympy import factorial >>> [''.join(i) for i in multiset_permutations('aab')] ['aab', 'aba', 'baa'] >>> factorial(len('banana')) 720 >>> len(list(multiset_permutations('banana'))) 60 """""" if g is None: if type(m) is dict: g = [[k, m[k]] for k in ordered(m)] else: m = list(ordered(m)) g = [list(i) for i in group(m, multiple=False)] del m do = [gi for gi in g if gi[1] > 0] SUM = sum([gi[1] for gi in do]) if not do or size is not None and (size > SUM or size < 1): if not do and size is None or size == 0: yield [] return elif size == 1: for k, v in do: yield [k] elif len(do) == 1: k, v = do[0] v = v if size is None else (size if size <= v else 0) yield [k for i in range(v)] elif all(v == 1 for k, v in do): for p in permutations([k for k, v in do], size): yield list(p) else: size = size if size is not None else SUM for i, (k, v) in enumerate(do): do[i][1] -= 1 for j in multiset_permutations(None, size - 1, do): if j: yield [k] + j do[i][1] += 1 " 32094,"def set_next_fetch_run(last_run, incidents, fetch_limit, start_fetch_time, end_fetch_time, look_back, created_time_field, id_field='id', date_format='%Y-%m-%dT%H:%M:%S', save_incidents_in_last_run=False, increase_last_run_time=False): """""" Sets the next run :type last_run: ``dict`` :param last_run: The LastRun object :type incidents: ``list`` :param incidents: List of incidents :type fetch_limit: ``int`` :param fetch_limit: The fetch limit :type start_fetch_time: ``str`` :param start_fetch_time: The start time to fetch :type end_fetch_time: ``str`` :param end_fetch_time: The end time to fetch :type look_back: ``int`` :param look_back: The time to look back in fetch in minutes :type created_time_field: ``str`` :param created_time_field: The incident created time field :type id_field: ``str`` :param id_field: The incident id field :type date_format: ``str`` :param date_format: The date format :type save_incidents_in_last_run: ``bool`` :param save_incidents_in_last_run: Whether to incidents in the last run object :type increase_last_run_time: ``bool`` :param increase_last_run_time: Whether to increase the last run time with one millisecond :return: The new last run object and list of incidents :rtype: ``Tuple`` """""" found_incidents = last_run.get('found_incident_ids', {}) current_time = int(time.time()) incidents_from_limit = incidents[fetch_limit:] incidents = incidents[:fetch_limit] for incident in incidents: found_incidents[incident[id_field]] = current_time found_incidents = remove_old_incidents_ids(found_incidents, current_time, look_back) if len(incidents) == 0: new_last_run = { 'time': end_fetch_time, 'limit': fetch_limit, 'found_incident_ids': found_incidents } elif len(incidents) < fetch_limit or look_back == 0: latest_incident_fetched_time = get_latest_incident_created_time(incidents, created_time_field, date_format, increase_last_run_time) new_last_run = { 'time': latest_incident_fetched_time, 'limit': fetch_limit, 'found_incident_ids': found_incidents } else: new_last_run = { 'time': start_fetch_time, 'limit': last_run.get('limit') + fetch_limit, 'found_incident_ids': found_incidents } if save_incidents_in_last_run: new_last_run['incidents'] = incidents_from_limit return new_last_run, incidents ","def set_next_fetch_run(last_run, incidents, fetch_limit, start_fetch_time, end_fetch_time, look_back, created_time_field, id_field='id', date_format='%Y-%m-%dT%H:%M:%S', save_incidents_in_last_run=False, increase_last_run_time=False): """""" Sets the next run :type last_run: ``dict`` :param last_run: The LastRun object :type incidents: ``list`` :param incidents: List of incidents :type fetch_limit: ``int`` :param fetch_limit: The fetch limit :type start_fetch_time: ``str`` :param start_fetch_time: The start time to fetch :type end_fetch_time: ``str`` :param end_fetch_time: The end time to fetch :type look_back: ``int`` :param look_back: The time to look back in fetch in minutes :type created_time_field: ``str`` :param created_time_field: The incident created time field :type id_field: ``str`` :param id_field: The incident id field :type date_format: ``str`` :param date_format: The date format :type save_incidents_in_last_run: ``bool`` :param save_incidents_in_last_run: Whether to incidents in the last run object :type increase_last_run_time: ``bool`` :param increase_last_run_time: Whether to increase the last run time with one millisecond :return: The new last run object and list of incidents :rtype: ``Tuple`` """""" found_incidents = last_run.get('found_incident_ids', {}) current_time = int(time.time()) incidents_from_limit = incidents[fetch_limit:] incidents = incidents[:fetch_limit] for incident in incidents: found_incidents[incident[id_field]] = current_time found_incidents = remove_old_incidents_ids(found_incidents, current_time, look_back) if len(incidents) == 0: new_last_run = { 'time': end_fetch_time, 'limit': fetch_limit, 'found_incident_ids': found_incidents } elif len(incidents) < fetch_limit or look_back == 0: latest_incident_fetched_time = get_latest_incident_created_time(incidents, created_time_field, date_format, increase_last_run_time) new_last_run = { 'time': latest_incident_fetched_time, 'limit': fetch_limit, 'found_incident_ids': found_incidents } else: new_last_run = { 'time': start_fetch_time, 'limit': last_run.get('limit') + fetch_limit, 'found_incident_ids': found_incidents } if save_incidents_in_last_run: new_last_run['remained_incidents'] = incidents_from_limit return new_last_run, incidents " 12962,"def test_query_products_query_with_filter_ids( staff_api_client, product, query_products_with_filter ): product_global_id = graphene.Node.to_global_id(""Product"", product.id) variables = {""filter"": {""ids"": [product_global_id]}} response = staff_api_client.post_graphql( query_products_with_filter, variables, check_no_permissions=False ) content = get_graphql_content(response) products_data = content[""data""][""products""][""edges""] assert len(products_data) == 1 assert products_data[0][""node""][""id""] == product_global_id ","def test_query_products_query_with_filter_ids( api_client, product, query_products_with_filter ): product_global_id = graphene.Node.to_global_id(""Product"", product.id) variables = {""filter"": {""ids"": [product_global_id]}} response = staff_api_client.post_graphql( query_products_with_filter, variables, check_no_permissions=False ) content = get_graphql_content(response) products_data = content[""data""][""products""][""edges""] assert len(products_data) == 1 assert products_data[0][""node""][""id""] == product_global_id " 6437,"def get_context(context): context.no_cache = 1 context.align_greeting = '' setting = frappe.get_doc(""Support Settings"") context.greeting_title = setting.greeting_title context.greeting_subtitle = setting.greeting_subtitle # Support content context.favorite_article_list=[] context.help_article_list=[] context.category_list = frappe.get_all(""Help Category"", fields=""name"") favorite_articles = get_favorite_articles_by_page_view() if not favorite_articles or len(favorite_articles) < 3: favorite_articles = get_latest_articles() set_favorite_articles(context, favorite_articles) set_help_article_list(context) ","def get_context(context): context.no_cache = 1 context.align_greeting = '' setting = frappe.get_doc(""Support Settings"") context.greeting_title = setting.greeting_title context.greeting_subtitle = setting.greeting_subtitle # Support content context.favorite_article_list=[] context.help_article_list=[] context.category_list = frappe.get_all(""Help Category"", fields=""name"") favorite_articles = get_favorite_articles_by_page_view() if not favorite_articles: favorite_articles = get_latest_articles() set_favorite_articles(context, favorite_articles) set_help_article_list(context) " 41486,"def plot_results(ax, mutests, tests, test_size=0.05): cls_obs = np.array([test[0] for test in tests]).flatten() cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)] ax.plot(mutests, cls_obs, c='k') for i, c in zip(range(5), ['k', 'k', 'k', 'k', 'k']): ax.plot(mutests, cls_exp[i], c=c, linestyle='dotted' if i != 2 else 'dashed') ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='y') ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='g') ax.plot(mutests, [test_size] * len(mutests), c='r') ax.set_ylim(0, 1) ","def plot_results(ax, mutests, tests, test_size=0.05): cls_obs = np.array([test[0] for test in tests]).flatten() cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)] ax.plot(mutests, cls_obs, c='k') for i, c in zip(range(5), ['k', 'k', 'k', 'k', 'k']): ax.plot(mutests, cls_exp[i], c=c, linestyle='dotted' if i != 2 else 'dashed') ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='y') ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='green') ax.plot(mutests, [test_size] * len(mutests), c='r') ax.set_ylim(0, 1) " 49000,"def test_dag_import(): """"""Test that the DAG file can be successfully imported. This tests that the DAG can be parsed, but does not run it in an Airflow environment. This is a recommended confidence check by the official Airflow docs: https://airflow.incubator.apache.org/tutorial.html#testing """""" from . import s3togcsoperator_tutorial as module internal_unit_testing.assert_has_valid_dag(module) ","def test_dag_import(): """"""Test that the DAG file can be successfully imported. This tests that the DAG can be parsed, but does not run it in an Airflow environment. This is a recommended confidence check by the official Airflow docs: https://airflow.incubator.apache.org/tutorial.html#testing """""" import s3togcsoperator_tutorial internal_unit_testing.assert_has_valid_dag(s3togcsoperator_tutorial) " 8959,"def find_directory_plugins(directory): """"""List plugins from a ``directory``. :param str directory: directory path to search :return: yield instance of :class:`~.handlers.PyFilePlugin` found in ``directory`` This function looks for single file and folder plugins in a directory. """""" for _, abspath in _list_plugin_filenames(directory): yield handlers.PyFilePlugin(abspath) ","def find_directory_plugins(directory): """"""List plugins from a ``directory``. :param str directory: directory path to search :return: yield instances of :class:`~.handlers.PyFilePlugin` found in ``directory`` This function looks for single file and folder plugins in a directory. """""" for _, abspath in _list_plugin_filenames(directory): yield handlers.PyFilePlugin(abspath) " 25567,"def register(client: GMatrixClient, signer: Signer, base_username: str) -> User: """""" Register a new random userid with the chosen Matrix server. """""" server_url = client.api.base_url server_name = urlparse(server_url).netloc # A deterministic userid cannot be used since that would be allow for an # DoS attack, were an attacker registers the userid before the real user. # To fix this a random number is added to the username. username = f""{base_username}.{Random().randint(0, 0xffffffff):08x}"" password = encode_hex(signer.sign(server_name.encode())) # Register will call sync internally, however, since this is a new account # and therefore it has no existing events, it is not necessary to set the # sync limit. client.register_with_password(username, password) signature_bytes = signer.sign(client.user_id.encode()) signature_hex = encode_hex(signature_bytes) user = client.get_user(client.user_id) user.set_display_name(signature_hex) log.debug( ""Matrix new user regsitered"", homeserver=server_name, server_url=server_url, username=username, ) return user ","def register(client: GMatrixClient, signer: Signer, base_username: str) -> User: """""" Register a new random userid with the chosen Matrix server. """""" server_url = client.api.base_url server_name = urlparse(server_url).netloc # A deterministic userid cannot be used since that would be allow for an # DoS attack, where an attacker registers the userid before the real user. # To fix this a random number is added to the username. username = f""{base_username}.{Random().randint(0, 0xffffffff):08x}"" password = encode_hex(signer.sign(server_name.encode())) # Register will call sync internally, however, since this is a new account # and therefore it has no existing events, it is not necessary to set the # sync limit. client.register_with_password(username, password) signature_bytes = signer.sign(client.user_id.encode()) signature_hex = encode_hex(signature_bytes) user = client.get_user(client.user_id) user.set_display_name(signature_hex) log.debug( ""Matrix new user regsitered"", homeserver=server_name, server_url=server_url, username=username, ) return user " 35034,"def matmul(data, weight, units=None, out_dtype="""", data_transposed=False, weight_transposed=False): """"""Dense operator. Applies a linear transformation. The X & W can be transposed. .. math:: `Y = X * W` Parameters ---------- data : tvm.relay.Expr The input data to the operator, of shape `(d_1, d_2, ..., d_n, units_in)` or `(d_1, d_2, ..., units_in, d_n)`. weight : tvm.relay.Expr The weight expressions, 2-D matrix, of shape `(units_in, units)` or `(units, units_in)`. units : int, optional Number of hidden units of the dense transformation. out_dtype : str, optional Specifies the output data type for mixed precision dense, of shape `(d_1, d_2, ..., d_n, units)`. data_transposed : bool, optional Whether the data tensor is in transposed format. weight_transposed : bool, optional Whether the weight tensor is in transposed format. Returns ------- result : tvm.relay.Expr The computed result. """""" return _make.matmul(data, weight, units, out_dtype, data_transposed, weight_transposed) ","def matmul(data, weight, units=None, out_dtype="""", data_transposed=False, weight_transposed=False): """"""Matmul operator. Applies a linear transformation. The X & W can be transposed. .. math:: `Y = X * W` Parameters ---------- data : tvm.relay.Expr The input data to the operator, of shape `(d_1, d_2, ..., d_n, units_in)` or `(d_1, d_2, ..., units_in, d_n)`. weight : tvm.relay.Expr The weight expressions, 2-D matrix, of shape `(units_in, units)` or `(units, units_in)`. units : int, optional Number of hidden units of the dense transformation. out_dtype : str, optional Specifies the output data type for mixed precision dense, of shape `(d_1, d_2, ..., d_n, units)`. data_transposed : bool, optional Whether the data tensor is in transposed format. weight_transposed : bool, optional Whether the weight tensor is in transposed format. Returns ------- result : tvm.relay.Expr The computed result. """""" return _make.matmul(data, weight, units, out_dtype, data_transposed, weight_transposed) " 32563,"def main(): # Grab 'data' from Demisto Arguments data = demisto.args()['data'] # Encode the data, ignoring characters try: encoded_data = data.encode('ascii', 'ignore').decode(""utf-8"") except Exception as e: return_error(f'Failed to encoding the data.\nError:\n{str(e)}') # Output the data and add results to war room return_results(CommandResults( readable_output=f'Success: {encoded_data}', outputs_prefix='asciiencode.encoded', outputs=encoded_data)) ","def main(): # Grab 'data' from Demisto Arguments data = demisto.args()['data'] # Encode the data, ignoring characters try: encoded_data = data.encode('ascii', 'ignore').decode(""utf-8"") except Exception as e: return_error(f'Failed to encode the data.\nError:\n{str(e)}') # Output the data and add results to war room return_results(CommandResults( readable_output=f'Success: {encoded_data}', outputs_prefix='asciiencode.encoded', outputs=encoded_data)) " 31278,"def run_script_command(client: Client, args: Dict) -> Tuple[str, Any, Any]: script_uid = args.get('script_uid') endpoint_ids = argToList(args.get('endpoint_ids')) try: timeout = int(args.get('timeout', 600)) except ValueError: raise ValueError('The timeout argument need to be an integer.') try: parameters = json.loads(args.get('parameters', '{}')) except json.decoder.JSONDecodeError as e: raise ValueError(f'The parameters argument is not in valid JSON structure. {e}') response = client.run_script(script_uid, endpoint_ids, parameters, timeout) reply = response.get('reply') return ( tableToMarkdown('Run Script', reply), { f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun(val.action_id == obj.action_id)': reply }, response ) ","def run_script_command(client: Client, args: Dict) -> Tuple[str, Any, Any]: script_uid = args.get('script_uid') endpoint_ids = argToList(args.get('endpoint_ids')) try: timeout = int(args.get('timeout', 600)) except ValueError: raise ValueError('The timeout argument need to be an integer.') try: parameters = json.loads(args.get('parameters', '{}')) except json.decoder.JSONDecodeError as e: raise ValueError(f'The parameters argument is not in a valid JSON structure:\n{e}') response = client.run_script(script_uid, endpoint_ids, parameters, timeout) reply = response.get('reply') return ( tableToMarkdown('Run Script', reply), { f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun(val.action_id == obj.action_id)': reply }, response ) " 23136,"def test_getnanos_deprecated(): with pytest.warns(FutureWarning, match=""getnanos is deprecated""): getnanos(None) ","def test_getnanos_deprecated(): with pytest.warns(FutureWarning, match=""getnanos was deprecated""): getnanos(None) " 5748,"def linregress(x, y=None, alternative='two-sided'): """""" Calculate a linear least-squares regression for two sets of measurements. Parameters ---------- x, y : array_like Two sets of measurements. Both arrays should have the same length. If only `x` is given (and ``y=None``), then it must be a two-dimensional array where one dimension has length 2. The two sets of measurements are then found by splitting the array along the length-2 dimension. In the case where ``y=None`` and `x` is a 2x2 array, ``linregress(x)`` is equivalent to ``linregress(x[0], x[1])``. alternative : {'two-sided', 'less', 'greater'}, optional Defines the alternative hypothesis. Default is 'two-sided'. The following options are available: * 'two-sided': the slope of the regression line is nonzero * 'less': the slope of the regression line is less than zero * 'greater': the slope of the regression line is greater than zero .. versionadded:: 1.7.0 Returns ------- result : ``LinregressResult`` instance The return value is an object with the following attributes: slope : float Slope of the regression line. intercept : float Intercept of the regression line. rvalue : float The Pearson correlation coefficient. The square of ``rvalue`` is equal to the coefficient of determination. pvalue : float The p-value for a hypothesis test whose null hypothesis is that the slope is zero, using Wald Test with t-distribution of the test statistic. See `alternative` above for alternative hypotheses. stderr : float Standard error of the estimated slope (gradient), under the assumption of residual normality. intercept_stderr : float Standard error of the estimated intercept, under the assumption of residual normality. See Also -------- scipy.optimize.curve_fit : Use non-linear least squares to fit a function to data. scipy.optimize.leastsq : Minimize the sum of squares of a set of equations. Notes ----- Missing values are considered pair-wise: if a value is missing in `x`, the corresponding value in `y` is masked. For compatibility with older versions of SciPy, the return value acts like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``, ``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write:: slope, intercept, r, p, se = linregress(x, y) With that style, however, the standard error of the intercept is not available. To have access to all the computed values, including the standard error of the intercept, use the return value as an object with attributes, e.g.:: result = linregress(x, y) print(result.intercept, result.intercept_stderr) Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> rng = np.random.default_rng() Generate some data: >>> x = rng.random(10) >>> y = 1.6*x + rng.random(10) Perform the linear regression: >>> res = stats.linregress(x, y) Coefficient of determination (R-squared): >>> print(f""R-squared: {res.rvalue**2:.6f}"") R-squared: 0.717533 Plot the data along with the fitted line: >>> plt.plot(x, y, 'o', label='original data') >>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line') >>> plt.legend() >>> plt.show() Calculate 95% confidence interval on slope and intercept: >>> # Two-sided inverse Students t-distribution >>> # p - probability, df - degrees of freedom >>> from scipy.stats import t >>> tinv = lambda p, df: abs(t.ppf(p/2, df)) >>> ts = tinv(0.05, len(x)-2) >>> print(f""slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}"") slope (95%): 1.453392 +/- 0.743465 >>> print(f""intercept (95%): {res.intercept:.6f}"" ... f"" +/- {ts*res.intercept_stderr:.6f}"") intercept (95%): 0.616950 +/- 0.544475 """""" TINY = 1.0e-20 if y is None: # x is a (2, N) or (N, 2) shaped array_like x = np.asarray(x) if x.shape[0] == 2: x, y = x elif x.shape[1] == 2: x, y = x.T else: raise ValueError(""If only `x` is given as input, it has to "" ""be of shape (2, N) or (N, 2); provided shape "" f""was {x.shape}."") else: x = np.asarray(x) y = np.asarray(y) if x.size == 0 or y.size == 0: raise ValueError(""Inputs must not be empty."") if np.amax(x) == np.amin(x) and len(x) > 1: raise ValueError(""Cannot calculate a linear regression "" ""if all x values are identical"") n = len(x) xmean = np.mean(x, None) ymean = np.mean(y, None) # Average sums of square differences from the mean # ssxm = mean( (x-mean(x))^2 ) # ssxym = mean( (x-mean(x)) * (y-mean(y)) ) ssxm, ssxym, _, ssym = np.cov(x, y, bias=1).flat # R-value # r = ssxym / sqrt( ssxm * ssym ) if ssxm == 0.0 or ssym == 0.0: # If the denominator was going to be 0 r = 0.0 else: r = ssxym / np.sqrt(ssxm * ssym) # Test for numerical error propagation (make sure -1 < r < 1) if r > 1.0: r = 1.0 elif r < -1.0: r = -1.0 slope = ssxym / ssxm intercept = ymean - slope*xmean if n == 2: # handle case when only two points are passed in if y[0] == y[1]: prob = 1.0 else: prob = 0.0 slope_stderr = 0.0 intercept_stderr = 0.0 else: df = n - 2 # Number of degrees of freedom # n-2 degrees of freedom because 2 has been used up # to estimate the mean and standard deviation t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY))) t, prob = scipy.stats.stats._ttest_finish(df, t, alternative) slope_stderr = np.sqrt((1 - r**2) * ssym / ssxm / df) # Also calculate the standard error of the intercept # The following relationship is used: # ssxm = mean( (x-mean(x))^2 ) # = ssx - sx*sx # = mean( x^2 ) - mean(x)^2 intercept_stderr = slope_stderr * np.sqrt(ssxm + xmean**2) return LinregressResult(slope=slope, intercept=intercept, rvalue=r, pvalue=prob, stderr=slope_stderr, intercept_stderr=intercept_stderr) ","def linregress(x, y=None, alternative='two-sided'): """""" Calculate a linear least-squares regression for two sets of measurements. Parameters ---------- x, y : array_like Two sets of measurements. Both arrays should have the same length. If only `x` is given (and ``y=None``), then it must be a two-dimensional array where one dimension has length 2. The two sets of measurements are then found by splitting the array along the length-2 dimension. In the case where ``y=None`` and `x` is a 2x2 array, ``linregress(x)`` is equivalent to ``linregress(x[0], x[1])``. alternative : {'two-sided', 'less', 'greater'}, optional Defines the alternative hypothesis. Default is 'two-sided'. The following options are available: * 'two-sided': the slope of the regression line is nonzero * 'less': the slope of the regression line is less than zero * 'greater': the slope of the regression line is greater than zero .. versionadded:: 1.7.0 Returns ------- result : ``LinregressResult`` instance The return value is an object with the following attributes: slope : float Slope of the regression line. intercept : float Intercept of the regression line. rvalue : float The Pearson correlation coefficient. The square of ``rvalue`` is equal to the coefficient of determination. pvalue : float The p-value for a hypothesis test whose null hypothesis is that the slope is zero, using Wald Test with t-distribution of the test statistic. See `alternative` above for alternative hypotheses. stderr : float Standard error of the estimated slope (gradient), under the assumption of residual normality. intercept_stderr : float Standard error of the estimated intercept, under the assumption of residual normality. See Also -------- scipy.optimize.curve_fit : Use non-linear least squares to fit a function to data. scipy.optimize.leastsq : Minimize the sum of squares of a set of equations. Notes ----- Missing values are considered pair-wise: if a value is missing in `x`, the corresponding value in `y` is masked. For compatibility with older versions of SciPy, the return value acts like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``, ``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write:: slope, intercept, r, p, se = linregress(x, y) With that style, however, the standard error of the intercept is not available. To have access to all the computed values, including the standard error of the intercept, use the return value as an object with attributes, e.g.:: result = linregress(x, y) print(result.intercept, result.intercept_stderr) Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> rng = np.random.default_rng() Generate some data: >>> x = rng.random(10) >>> y = 1.6*x + rng.random(10) Perform the linear regression: >>> res = stats.linregress(x, y) Coefficient of determination (R-squared): >>> print(f""R-squared: {res.rvalue**2:.6f}"") R-squared: 0.717533 Plot the data along with the fitted line: >>> plt.plot(x, y, 'o', label='original data') >>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line') >>> plt.legend() >>> plt.show() Calculate 95% confidence interval on slope and intercept: >>> # Two-sided inverse Students t-distribution >>> # p - probability, df - degrees of freedom >>> from scipy.stats import t >>> tinv = lambda p, df: abs(t.ppf(p/2, df)) >>> ts = tinv(0.05, len(x)-2) >>> print(f""slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}"") slope (95%): 1.453392 +/- 0.743465 >>> print(f""intercept (95%): {res.intercept:.6f}"" ... f"" +/- {ts*res.intercept_stderr:.6f}"") intercept (95%): 0.616950 +/- 0.544475 """""" TINY = 1.0e-20 if y is None: # x is a (2, N) or (N, 2) shaped array_like x = np.asarray(x) if x.shape[0] == 2: x, y = x elif x.shape[1] == 2: x, y = x.T else: raise ValueError(""If only `x` is given as input, it has to "" ""be of shape (2, N) or (N, 2); provided shape "" f""was {x.shape}."") else: x = np.asarray(x) y = np.asarray(y) if x.size == 0 or y.size == 0: raise ValueError(""Inputs must not be empty."") if np.amax(x) == np.amin(x): raise ValueError(""Cannot calculate a linear regression "" ""if all x values are identical"") n = len(x) xmean = np.mean(x, None) ymean = np.mean(y, None) # Average sums of square differences from the mean # ssxm = mean( (x-mean(x))^2 ) # ssxym = mean( (x-mean(x)) * (y-mean(y)) ) ssxm, ssxym, _, ssym = np.cov(x, y, bias=1).flat # R-value # r = ssxym / sqrt( ssxm * ssym ) if ssxm == 0.0 or ssym == 0.0: # If the denominator was going to be 0 r = 0.0 else: r = ssxym / np.sqrt(ssxm * ssym) # Test for numerical error propagation (make sure -1 < r < 1) if r > 1.0: r = 1.0 elif r < -1.0: r = -1.0 slope = ssxym / ssxm intercept = ymean - slope*xmean if n == 2: # handle case when only two points are passed in if y[0] == y[1]: prob = 1.0 else: prob = 0.0 slope_stderr = 0.0 intercept_stderr = 0.0 else: df = n - 2 # Number of degrees of freedom # n-2 degrees of freedom because 2 has been used up # to estimate the mean and standard deviation t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY))) t, prob = scipy.stats.stats._ttest_finish(df, t, alternative) slope_stderr = np.sqrt((1 - r**2) * ssym / ssxm / df) # Also calculate the standard error of the intercept # The following relationship is used: # ssxm = mean( (x-mean(x))^2 ) # = ssx - sx*sx # = mean( x^2 ) - mean(x)^2 intercept_stderr = slope_stderr * np.sqrt(ssxm + xmean**2) return LinregressResult(slope=slope, intercept=intercept, rvalue=r, pvalue=prob, stderr=slope_stderr, intercept_stderr=intercept_stderr) " 40967,"def migrate_footer_to_static_placeholder(apps, schema_editor): """""" Create a footer with the new static placeholder from the existing footer pages that were placed under the ""annex"" page and displayed in the footer via a `show_menu_below_id` template tag. """""" Page = apps.get_model(""cms"", ""Page"") Title = apps.get_model(""cms"", ""Title"") # We should import StaticPlaceholder from apps but its `draft` and `public` fields # are custom foreign key field that checks that they are targeting an instance of # # cms.models Placeholder so the code would not work. We can safely assume that the # Placeholder and StaticPlaceholder models is still there when this migration is run static_placeholder, was_created = StaticPlaceholder.objects.get_or_create( code=""footer"" ) if not was_created: # If the static placeholder was already existing, it means this migration is being # replayed and we better do nothing return for is_draft in [False, True]: # Look for an existing footer page try: footer_page = Page.objects.get( reverse_id=""annex"", publisher_is_draft=is_draft ) except Page.DoesNotExist: return placeholder = ( static_placeholder.draft if is_draft else static_placeholder.public ) for language in Title.objects.filter(page=footer_page).values_list( ""language"", flat=True ): # Create the

    section to carry the list of links section = add_plugin( placeholder, plugin_type=""SectionPlugin"", language=language, template=""richie/section/section_list.html"", ) # Create a
  • link for each page in the exiting footer menu for page in Page.objects.filter( node__parent=footer_page.node, in_navigation=True, title_set__language=language, publisher_is_draft=is_draft, ): title = page.title_set.get(language=language) add_plugin( placeholder, plugin_type=""LinkPlugin"", language=language, internal_link_id=page.id, name=title.title, target=section, ) ","def migrate_footer_to_static_placeholder(apps, schema_editor): """""" Create a footer with the new static placeholder from the existing footer pages that were placed under the ""annex"" page and displayed in the footer via a `show_menu_below_id` template tag. """""" Page = apps.get_model(""cms"", ""Page"") Title = apps.get_model(""cms"", ""Title"") # We should import StaticPlaceholder from apps but its `draft` and `public` fields # are custom foreign key field that checks that they are targeting an instance of # # cms.models Placeholder so the code would not work. We can safely assume that the # Placeholder and StaticPlaceholder models is still there when this migration is run static_placeholder, was_created = StaticPlaceholder.objects.get_or_create( code=""footer"" ) if not was_created: # If the static placeholder was already existing, it means this migration is being # replayed and we better do nothing. return for is_draft in [False, True]: # Look for an existing footer page try: footer_page = Page.objects.get( reverse_id=""annex"", publisher_is_draft=is_draft ) except Page.DoesNotExist: return placeholder = ( static_placeholder.draft if is_draft else static_placeholder.public ) for language in Title.objects.filter(page=footer_page).values_list( ""language"", flat=True ): # Create the
      section to carry the list of links section = add_plugin( placeholder, plugin_type=""SectionPlugin"", language=language, template=""richie/section/section_list.html"", ) # Create a
    • link for each page in the exiting footer menu for page in Page.objects.filter( node__parent=footer_page.node, in_navigation=True, title_set__language=language, publisher_is_draft=is_draft, ): title = page.title_set.get(language=language) add_plugin( placeholder, plugin_type=""LinkPlugin"", language=language, internal_link_id=page.id, name=title.title, target=section, ) " 26748,"def upgrade_check(args): if args.save: filename = args.save if not filename.lower().endswith("".json""): print(""Only JSON files are supported"", file=sys.stderr) formatter = JSONFormatter(args.save) else: formatter = ConsoleFormatter() all_problems = check_upgrade(formatter) if all_problems: exit(1) ","def upgrade_check(args): if args.save: filename = args.save if not filename.lower().endswith("".json""): print(""Only JSON files are supported"", file=sys.stderr) formatter = JSONFormatter(args.save) else: formatter = ConsoleFormatter() all_problems = check_upgrade(formatter) if all_problems: sys.exit(1) " 54822,"def sample_tmsv( r: list, t: float, Ul: np.ndarray, w: np.ndarray, n_samples: int, loss: float = 0.0, ) -> list: r""""""Generate samples for simulating vibrational quantum dynamics with a two-mode squeezed vacuum input state. This function generates samples from a GBS device with two-mode squeezed vacuum input states. Given :math:`N` squeezing parameters and an :math:`N`-dimensional normal-to-local transformation matrix, a GBS device with :math:`2N` modes is simulated. The TimeEvolution operator acts on only the first :math:`N` modes in the device. Samples are generated by measuring the number of photons in each of the :math:`2N` modes. **Example usage:** >>> r = [[0.2, 0.1], [0.8, 0.2]] >>> t = 10.0 >>> Ul = np.array([[0.707106781, -0.707106781], >>> [0.707106781, 0.707106781]]) >>> w = np.array([3914.92, 3787.59]) >>> n_samples = 5 >>> sample_tmsv(r, t, Ul, w, n_samples) [[0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 2, 0, 2]] Args: r (list[list[float]]): list of two-mode squeezing gate parameters given as ``[amplitude, phase]`` for all modes t (float): time in femtoseconds Ul (array): normal-to-local transformation matrix w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}` n_samples (int): number of samples to be generated loss (float): loss parameter denoting the fraction of lost photons Returns: list[list[int]]: a list of samples """""" if np.any(np.iscomplex(Ul)): raise ValueError(""The normal mode to local mode transformation matrix must be real"") if n_samples < 1: raise ValueError(""Number of samples must be at least one"") if not len(r) == len(Ul): raise ValueError( ""Number of squeezing parameters and the number of modes in the normal-to-local"" "" transformation matrix must be equal"" ) N = len(Ul) eng = sf.LocalEngine(backend=""gaussian"") prog = sf.Program(2 * N) # pylint: disable=expression-not-assigned with prog.context as q: for i in range(N): sf.ops.S2gate(r[i][0], r[i][1]) | (q[i], q[i + N]) sf.ops.Interferometer(Ul.T) | q[:N] TimeEvolution(t, w) | q[:N] sf.ops.Interferometer(Ul) | q[:N] if loss: for _q in q: sf.ops.LossChannel(1 - loss) | _q sf.ops.MeasureFock() | q with warnings.catch_warnings(): warnings.filterwarnings(""ignore"", category=UserWarning, message=""Cannot simulate non-"") s = eng.run(prog, shots=n_samples).samples return s.tolist() ","def sample_tmsv( r: list, t: float, Ul: np.ndarray, w: np.ndarray, n_samples: int, loss: float = 0.0, ) -> list: r""""""Generate samples for simulating vibrational quantum dynamics with a two-mode squeezed vacuum input state. This function generates samples from a GBS device with two-mode squeezed vacuum input states. Given :math:`N` squeezing parameters and an :math:`N`-dimensional normal-to-local transformation matrix, a GBS device with :math:`2N` modes is simulated. The :func:`~.TimeEvolution` operator acts on only the first :math:`N` modes in the device. Samples are generated by measuring the number of photons in each of the :math:`2N` modes. **Example usage:** >>> r = [[0.2, 0.1], [0.8, 0.2]] >>> t = 10.0 >>> Ul = np.array([[0.707106781, -0.707106781], >>> [0.707106781, 0.707106781]]) >>> w = np.array([3914.92, 3787.59]) >>> n_samples = 5 >>> sample_tmsv(r, t, Ul, w, n_samples) [[0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 2, 0, 2]] Args: r (list[list[float]]): list of two-mode squeezing gate parameters given as ``[amplitude, phase]`` for all modes t (float): time in femtoseconds Ul (array): normal-to-local transformation matrix w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}` n_samples (int): number of samples to be generated loss (float): loss parameter denoting the fraction of lost photons Returns: list[list[int]]: a list of samples """""" if np.any(np.iscomplex(Ul)): raise ValueError(""The normal mode to local mode transformation matrix must be real"") if n_samples < 1: raise ValueError(""Number of samples must be at least one"") if not len(r) == len(Ul): raise ValueError( ""Number of squeezing parameters and the number of modes in the normal-to-local"" "" transformation matrix must be equal"" ) N = len(Ul) eng = sf.LocalEngine(backend=""gaussian"") prog = sf.Program(2 * N) # pylint: disable=expression-not-assigned with prog.context as q: for i in range(N): sf.ops.S2gate(r[i][0], r[i][1]) | (q[i], q[i + N]) sf.ops.Interferometer(Ul.T) | q[:N] TimeEvolution(t, w) | q[:N] sf.ops.Interferometer(Ul) | q[:N] if loss: for _q in q: sf.ops.LossChannel(1 - loss) | _q sf.ops.MeasureFock() | q with warnings.catch_warnings(): warnings.filterwarnings(""ignore"", category=UserWarning, message=""Cannot simulate non-"") s = eng.run(prog, shots=n_samples).samples return s.tolist() " 4155,"def parse_command_line(args): from .Main import CompilationOptions, default_options pending_arg = [] def pop_arg(): if not args or pending_arg: bad_usage() if '=' in args[0] and args[0].startswith('--'): # allow ""--long-option=xyz"" name, value = args.pop(0).split('=', 1) pending_arg.append(value) return name return args.pop(0) def pop_value(default=None): if pending_arg: return pending_arg.pop() elif default is not None: return default elif not args: bad_usage() return args.pop(0) def get_param(option): tail = option[2:] if tail: return tail else: return pop_arg() options = CompilationOptions(default_options) sources = [] while args: if args[0].startswith(""-""): option = pop_arg() if option in (""-V"", ""--version""): options.show_version = 1 elif option in (""-l"", ""--create-listing""): options.use_listing_file = 1 elif option in (""-+"", ""--cplus""): options.cplus = 1 elif option == ""--embed"": Options.embed = pop_value(""main"") elif option.startswith(""-I""): options.include_path.append(get_param(option)) elif option == ""--include-dir"": options.include_path.append(pop_value()) elif option in (""-w"", ""--working""): options.working_path = pop_value() elif option in (""-o"", ""--output-file""): options.output_file = pop_value() elif option in (""-t"", ""--timestamps""): options.timestamps = 1 elif option in (""-f"", ""--force""): options.timestamps = 0 elif option in (""-v"", ""--verbose""): options.verbose += 1 elif option in (""-p"", ""--embed-positions""): Options.embed_pos_in_docstring = 1 elif option in (""-z"", ""--pre-import""): Options.pre_import = pop_value() elif option == ""--cleanup"": Options.generate_cleanup_code = int(pop_value()) elif option in (""-D"", ""--no-docstrings""): Options.docstrings = False elif option in (""-a"", ""--annotate""): Options.annotate = True elif option == ""--annotate-coverage"": Options.annotate = True Options.annotate_coverage_xml = pop_value() elif option == ""--convert-range"": Options.convert_range = True elif option == ""--line-directives"": options.emit_linenums = True elif option == ""--no-c-in-traceback"": options.c_line_in_traceback = False elif option == ""--gdb"": options.gdb_debug = True options.output_dir = os.curdir elif option == ""--gdb-outdir"": options.gdb_debug = True options.output_dir = pop_value() elif option == ""--lenient"": Options.error_on_unknown_names = False Options.error_on_uninitialized = False elif option == '-2': options.language_level = 2 elif option == '-3': options.language_level = 3 elif option == '--3str': options.language_level = '3str' elif option == ""--capi-reexport-cincludes"": options.capi_reexport_cincludes = True elif option == ""--fast-fail"": Options.fast_fail = True elif option == ""--cimport-from-pyx"": Options.cimport_from_pyx = True elif option in ('-Werror', '--warning-errors'): Options.warning_errors = True elif option in ('-Wextra', '--warning-extra'): options.compiler_directives.update(Options.extra_warnings) elif option == ""--old-style-globals"": Options.old_style_globals = True elif option == ""--directive"" or option.startswith('-X'): if option.startswith('-X') and option[2:].strip(): x_args = option[2:] else: x_args = pop_value() try: options.compiler_directives = Options.parse_directive_list( x_args, relaxed_bool=True, current_settings=options.compiler_directives) except ValueError as e: sys.stderr.write(""Error in compiler directive: %s\n"" % e.args[0]) sys.exit(1) elif option == ""--compile-time-env"" or option.startswith('-E'): if option.startswith('-E') and option[2:].strip(): x_args = option[2:] else: x_args = pop_value() try: options.compile_time_env = Options.parse_compile_time_env( x_args, current_settings=options.compile_time_env) except ValueError as e: sys.stderr.write(""Error in compile-time-env: %s\n"" % e.args[0]) sys.exit(1) elif option == ""--module-name"": options.module_name = pop_value() elif option.startswith('--debug'): option = option[2:].replace('-', '_') from . import DebugFlags if option in dir(DebugFlags): setattr(DebugFlags, option, True) else: sys.stderr.write(""Unknown debug flag: %s\n"" % option) bad_usage() elif option in ('-h', '--help'): sys.stdout.write(usage) sys.exit(0) else: sys.stderr.write(""Unknown compiler flag: %s\n"" % option) sys.exit(1) else: sources.append(pop_arg()) if pending_arg: bad_usage() if options.use_listing_file and len(sources) > 1: sys.stderr.write( ""cython: Only one source file allowed when using -o\n"") sys.exit(1) if len(sources) == 0 and not options.show_version: bad_usage() if options.embed and len(sources) > 1: sys.stderr.write( ""cython: Only one source file allowed when using --embed\n"") sys.exit(1) if options.module_name: if options.timestamps: sys.stderr.write( ""cython: Cannot use --module-name with --timestamps\n"") sys.exit(1) if len(sources) > 1: sys.stderr.write( ""cython: Only one source file allowed when using --module-name\n"") sys.exit(1) parser.error("""") return options, sources ","def parse_command_line(args): from .Main import CompilationOptions, default_options pending_arg = [] def pop_arg(): if not args or pending_arg: bad_usage() if '=' in args[0] and args[0].startswith('--'): # allow ""--long-option=xyz"" name, value = args.pop(0).split('=', 1) pending_arg.append(value) return name return args.pop(0) def pop_value(default=None): if pending_arg: return pending_arg.pop() elif default is not None: return default elif not args: bad_usage() return args.pop(0) def get_param(option): tail = option[2:] if tail: return tail else: return pop_arg() options = CompilationOptions(default_options) sources = [] while args: if args[0].startswith(""-""): option = pop_arg() if option in (""-V"", ""--version""): options.show_version = 1 elif option in (""-l"", ""--create-listing""): options.use_listing_file = 1 elif option in (""-+"", ""--cplus""): options.cplus = 1 elif option == ""--embed"": Options.embed = pop_value(""main"") elif option.startswith(""-I""): options.include_path.append(get_param(option)) elif option == ""--include-dir"": options.include_path.append(pop_value()) elif option in (""-w"", ""--working""): options.working_path = pop_value() elif option in (""-o"", ""--output-file""): options.output_file = pop_value() elif option in (""-t"", ""--timestamps""): options.timestamps = 1 elif option in (""-f"", ""--force""): options.timestamps = 0 elif option in (""-v"", ""--verbose""): options.verbose += 1 elif option in (""-p"", ""--embed-positions""): Options.embed_pos_in_docstring = 1 elif option in (""-z"", ""--pre-import""): Options.pre_import = pop_value() elif option == ""--cleanup"": Options.generate_cleanup_code = int(pop_value()) elif option in (""-D"", ""--no-docstrings""): Options.docstrings = False elif option in (""-a"", ""--annotate""): Options.annotate = True elif option == ""--annotate-coverage"": Options.annotate = True Options.annotate_coverage_xml = pop_value() elif option == ""--convert-range"": Options.convert_range = True elif option == ""--line-directives"": options.emit_linenums = True elif option == ""--no-c-in-traceback"": options.c_line_in_traceback = False elif option == ""--gdb"": options.gdb_debug = True options.output_dir = os.curdir elif option == ""--gdb-outdir"": options.gdb_debug = True options.output_dir = pop_value() elif option == ""--lenient"": Options.error_on_unknown_names = False Options.error_on_uninitialized = False elif option == '-2': options.language_level = 2 elif option == '-3': options.language_level = 3 elif option == '--3str': options.language_level = '3str' elif option == ""--capi-reexport-cincludes"": options.capi_reexport_cincludes = True elif option == ""--fast-fail"": Options.fast_fail = True elif option == ""--cimport-from-pyx"": Options.cimport_from_pyx = True elif option in ('-Werror', '--warning-errors'): Options.warning_errors = True elif option in ('-Wextra', '--warning-extra'): options.compiler_directives.update(Options.extra_warnings) elif option == ""--old-style-globals"": Options.old_style_globals = True elif option == ""--directive"" or option.startswith('-X'): if option.startswith('-X') and option[2:].strip(): x_args = option[2:] else: x_args = pop_value() try: options.compiler_directives = Options.parse_directive_list( x_args, relaxed_bool=True, current_settings=options.compiler_directives) except ValueError as e: sys.stderr.write(""Error in compiler directive: %s\n"" % e.args[0]) sys.exit(1) elif option == ""--compile-time-env"" or option.startswith('-E'): if option.startswith('-E') and option[2:].strip(): x_args = option[2:] else: x_args = pop_value() try: options.compile_time_env = Options.parse_compile_time_env( x_args, current_settings=options.compile_time_env) except ValueError as e: sys.stderr.write(""Error in compile-time-env: %s\n"" % e.args[0]) sys.exit(1) elif option == ""--module-name"": options.module_name = pop_value() elif option.startswith('--debug'): option = option[2:].replace('-', '_') from . import DebugFlags if option in dir(DebugFlags): setattr(DebugFlags, option, True) else: sys.stderr.write(""Unknown debug flag: %s\n"" % option) bad_usage() elif option in ('-h', '--help'): sys.stdout.write(usage) sys.exit(0) else: sys.stderr.write(""Unknown compiler flag: %s\n"" % option) sys.exit(1) else: sources.append(pop_arg()) if pending_arg: bad_usage() if options.use_listing_file and len(sources) > 1: sys.stderr.write( ""cython: Only one source file allowed when using -o\n"") sys.exit(1) if len(sources) == 0 and not options.show_version: bad_usage() if Options.embed and len(sources) > 1: sys.stderr.write( ""cython: Only one source file allowed when using --embed\n"") sys.exit(1) if options.module_name: if options.timestamps: sys.stderr.write( ""cython: Cannot use --module-name with --timestamps\n"") sys.exit(1) if len(sources) > 1: sys.stderr.write( ""cython: Only one source file allowed when using --module-name\n"") sys.exit(1) parser.error("""") return options, sources " 53470,"def iterator_prefix(iterator, stop: int): for i, item in enumerate(iterator): if i == stop: # [no-else-break] break else: yield item ","def next_seven_elements(iterator): for i, item in enumerate(iterator): if i == 7: # [no-else-break] break else: yield item " 1770,"def _encode(values, uniques=None, encode=False, check_unknown=True): """"""Helper function to factorize (find uniques) and encode values. Uses pure python method for object dtype, and numpy method for all other dtypes. The numpy method has the limitation that the `uniques` need to be sorted. Importantly, this is not checked but assumed to already be the case. The calling method needs to ensure this for all non-object values. Parameters ---------- values : array Values to factorize or encode. uniques : array, optional If passed, uniques are not determined from passed values (this can be because the user specified categories, or because they already have been determined in fit). encode : bool, default False If True, also encode the values into integer codes based on `uniques`. check_unknown : bool, default True If True, check for values in ``values`` that are not in ``unique`` and raise an error. This is ignored for object dtype, and treated as True in this case. This parameter is useful for _BaseEncoder._transform() to avoid calling _encode_check_unknown() twice. Returns ------- uniques If ``encode=False``. The unique values are sorted if the `uniques` parameter was None (and thus inferred from the data). (uniques, encoded) If ``encode=True``. """""" if values.dtype == object: try: res = _encode_python(values, uniques, encode) except TypeError: raise TypeError(""argument must be a string or number"") except ValueError as e: raise e return res else: return _encode_numpy(values, uniques, encode, check_unknown=check_unknown) ","def _encode(values, uniques=None, encode=False, check_unknown=True): """"""Helper function to factorize (find uniques) and encode values. Uses pure python method for object dtype, and numpy method for all other dtypes. The numpy method has the limitation that the `uniques` need to be sorted. Importantly, this is not checked but assumed to already be the case. The calling method needs to ensure this for all non-object values. Parameters ---------- values : array Values to factorize or encode. uniques : array, optional If passed, uniques are not determined from passed values (this can be because the user specified categories, or because they already have been determined in fit). encode : bool, default False If True, also encode the values into integer codes based on `uniques`. check_unknown : bool, default True If True, check for values in ``values`` that are not in ``unique`` and raise an error. This is ignored for object dtype, and treated as True in this case. This parameter is useful for _BaseEncoder._transform() to avoid calling _encode_check_unknown() twice. Returns ------- uniques If ``encode=False``. The unique values are sorted if the `uniques` parameter was None (and thus inferred from the data). (uniques, encoded) If ``encode=True``. """""" if values.dtype == object: try: res = _encode_python(values, uniques, encode) except TypeError: raise TypeError(""argument must be a string or number"") except ValueError as e: raise return res else: return _encode_numpy(values, uniques, encode, check_unknown=check_unknown) " 43648,"def xy_mixer(graph): r""""""""Creates the generalized SWAP/XY mixer outlined in `this paper `__, defined as: .. math:: H_M \ = \ \frac{1}{2} \displaystyle\sum_{(i, j) \in E(G)} X_i X_j \ + \ Y_i Y_j, for some graph :math:`G`. :math:`X_i` and :math:`Y_i` denote the Pauli-X and Pauli-Y on the :math:`i`-th qubit respectively. Args: graph (Iterable or networkx.Graph) A graph defining the pairs of wires on which each term of the Hamiltonian acts. """""" ############## # Input checks if isinstance(graph, networkx.Graph): graph = graph.edges elif isinstance(graph, Iterable): check_iterable_graph(graph) else: raise ValueError( ""Inputted graph must be a networkx.Graph object or Iterable, got {}"".format( type(graph).__name__ ) ) ############## coeffs = 2 * [0.5 for i in graph] obs = [] for e in graph: obs.append(qml.PauliX(Wires(e[0])) @ qml.PauliX(Wires(e[1]))) obs.append(qml.PauliY(Wires(e[0])) @ qml.PauliY(Wires(e[1]))) return qml.Hamiltonian(coeffs, obs) ","def xy_mixer(graph): r""""""""Creates the generalized SWAP/XY mixer outlined in `this paper `__, defined as: .. math:: H_M \ = \ \frac{1}{2} \displaystyle\sum_{(i, j) \in E(G)} X_i X_j \ + \ Y_i Y_j, for some graph :math:`G`. :math:`X_i` and :math:`Y_i` denote the Pauli-X and Pauli-Y operators on the :math:`i`-th qubit respectively. Args: graph (Iterable or networkx.Graph) A graph defining the pairs of wires on which each term of the Hamiltonian acts. """""" ############## # Input checks if isinstance(graph, networkx.Graph): graph = graph.edges elif isinstance(graph, Iterable): check_iterable_graph(graph) else: raise ValueError( ""Inputted graph must be a networkx.Graph object or Iterable, got {}"".format( type(graph).__name__ ) ) ############## coeffs = 2 * [0.5 for i in graph] obs = [] for e in graph: obs.append(qml.PauliX(Wires(e[0])) @ qml.PauliX(Wires(e[1]))) obs.append(qml.PauliY(Wires(e[0])) @ qml.PauliY(Wires(e[1]))) return qml.Hamiltonian(coeffs, obs) " 46151,"def ds_as_cds(dataset): """""" Converts Vega dataset into Bokeh ColumnDataSource data """""" if len(dataset) == 0: return {} # create a list of unique keys from all items as some items may not include optional fields keys = list(set(k for d in dataset for k in d.keys())) keys.sort() data = {k: [] for k in keys} for item in dataset: for k in keys: data[k].append(item.get(v)) data = {k: np.asarray(v) for k, v in data.items()} return data ","def ds_as_cds(dataset): """""" Converts Vega dataset into Bokeh ColumnDataSource data """""" if len(dataset) == 0: return {} # create a list of unique keys from all items as some items may not include optional fields keys = sorted(set(k for d in dataset for k in d.keys())) data = {k: [] for k in keys} for item in dataset: for k in keys: data[k].append(item.get(v)) data = {k: np.asarray(v) for k, v in data.items()} return data " 30361,"def generate_ip_queries(ips): ips = [ip for ip in ips if is_ip_valid(ip)] if not ips: return {} queries = {} # Cortex traps IP ip_fields = [""endPointHeader.agentIp='{}'"".format(ip) for ip in ips] query_cortex_traps_ip = ' OR '.join(ip_fields) queries['CortexTrapsIP'] = 'SELECT * from tms.threat where {}'.format(query_cortex_traps_ip) # Cortex Analytics IP ip_fields = [""endPointHeader.agentIp='{}'"".format(ip) for ip in ips] query_cortex_analytics_ip = ' OR '.join(ip_fields) queries['CortexAnalyticsIP'] = 'SELECT * from tms.analytics where {}'.format(query_cortex_analytics_ip) # Cortex Traffic IP ip_fields = [""src='{0}' OR dst='{0}'"".format(ip) for ip in ips] query_cortex_traffic_ip = ' OR '.join(ip_fields) queries['CortexTrafficIP'] = 'SELECT * from panw.traffic where {}'.format(query_cortex_traffic_ip) # Cortex Threat IP ip_fields = [""src='{0}' OR dst='{0}'"".format(ip) for ip in ips] query_cortex_threat_ip = ' OR '.join(ip_fields) queries['CortexThreatIP'] = 'SELECT * from panw.threat where {}'.format(query_cortex_threat_ip) # Autofocus Sessions IP children = [{ 'field': 'alias.ip_address', 'operator': 'contains', 'value': ip } for ip in ips] query_autofocus_sessions_ip = { 'operator': 'any', 'children': children } queries['AutofocusSessionsIP'] = json.dumps(query_autofocus_sessions_ip) # Panorama IP ip_fields = [""( addr.src in {0} ) or ( addr.dst in {0} )"".format(ip) for ip in ips] query_panorama_ip = ' or '.join(ip_fields) queries['PanoramaIP'] = query_panorama_ip return queries ","def generate_ip_queries(ips): ips = [ip for ip in ips if is_ip_valid(ip)] if not ips: return {} queries = {} # Cortex traps IP ip_fields = [""endPointHeader.agentIp='{}'"".format(ip) for ip in ips] query_cortex_traps_ip = ' OR '.join(ip_fields) queries['CortexTrapsIP'] = f'SELECT * from tms.threat where {query_cortex_traps_ip}' # guardrails-disable-line # Cortex Analytics IP ip_fields = [""endPointHeader.agentIp='{}'"".format(ip) for ip in ips] query_cortex_analytics_ip = ' OR '.join(ip_fields) queries['CortexAnalyticsIP'] = 'SELECT * from tms.analytics where {}'.format(query_cortex_analytics_ip) # Cortex Traffic IP ip_fields = [""src='{0}' OR dst='{0}'"".format(ip) for ip in ips] query_cortex_traffic_ip = ' OR '.join(ip_fields) queries['CortexTrafficIP'] = 'SELECT * from panw.traffic where {}'.format(query_cortex_traffic_ip) # Cortex Threat IP ip_fields = [""src='{0}' OR dst='{0}'"".format(ip) for ip in ips] query_cortex_threat_ip = ' OR '.join(ip_fields) queries['CortexThreatIP'] = 'SELECT * from panw.threat where {}'.format(query_cortex_threat_ip) # Autofocus Sessions IP children = [{ 'field': 'alias.ip_address', 'operator': 'contains', 'value': ip } for ip in ips] query_autofocus_sessions_ip = { 'operator': 'any', 'children': children } queries['AutofocusSessionsIP'] = json.dumps(query_autofocus_sessions_ip) # Panorama IP ip_fields = [""( addr.src in {0} ) or ( addr.dst in {0} )"".format(ip) for ip in ips] query_panorama_ip = ' or '.join(ip_fields) queries['PanoramaIP'] = query_panorama_ip return queries " 31361,"def categories_command(client: Client) -> CommandResults: """"""Get categories list from TOPdesk"""""" categories = client.get_list(""/incidents/categories"") return command_with_all_fields_readable_list(results=categories, result_name='categories', output_prefix='category', outputs_key_field='id') ","def categories_command(client: Client) -> CommandResults: """"""Get categories list from TOPdesk"""""" categories = client.get_list(""/incidents/categories"") return command_with_all_fields_readable_list(results=categories, result_name='categories', output_prefix='Category', outputs_key_field='id') " 8142,"def get_rectangle_coordinates(bottom_left, *, top_right = None, width: u.deg = None, height: u.deg = None): if width is None and height is None: if not top_right: if bottom_left.shape[0] != 2: raise IndexError(""If top_right is not specified bottom_left must have length two or width and height must be provided."") else: top_right = bottom_left[1] bottom_left = bottom_left[0] if type(bottom_left) is not type(top_right): raise ValueError(""top_right must be of same type as bottom_left"") if isinstance(bottom_left, (SkyCoord, BaseCoordinateFrame)): top_right = top_right.transform_to(bottom_left.frame.name) width = (top_right.spherical.lon - bottom_left.spherical.lon).to(u.deg) # Getting the difference in Longitudes. height = (top_right.spherical.lat - bottom_left.spherical.lat).to(u.deg) # Getting the difference in Latitudes. if width <= 0*u.deg or height <= 0*u.deg: raise ValueError(""top_right must be to the right and above bottom_left."") else: raise ValueError(""Invalid value passed for bottom_left"") elif top_right is None: top_right = SkyCoord(bottom_left.data.lon + width, bottom_left.data.lat + height, frame=bottom_left.frame.name) else: raise ValueError(""Invalid input, bottom_left and top_right must either be SkyCoord"") return (bottom_left, top_right) ","def get_rectangle_coordinates(bottom_left, *, top_right = None, width: u.deg = None, height: u.deg = None): if width is None and height is None: if not top_right: if bottom_left.shape[0] != 2: raise IndexError(""If top_right is not specified bottom_left must have length two or width and height must be provided."") else: top_right = bottom_left[1] bottom_left = bottom_left[0] if type(bottom_left) is not type(top_right): raise ValueError(""top_right must be of same type as bottom_left"") if isinstance(bottom_left, (SkyCoord, BaseCoordinateFrame)): top_right = top_right.transform_to(bottom_left.frame) width = (top_right.spherical.lon - bottom_left.spherical.lon).to(u.deg) # Getting the difference in Longitudes. height = (top_right.spherical.lat - bottom_left.spherical.lat).to(u.deg) # Getting the difference in Latitudes. if width <= 0*u.deg or height <= 0*u.deg: raise ValueError(""top_right must be to the right and above bottom_left."") else: raise ValueError(""Invalid value passed for bottom_left"") elif top_right is None: top_right = SkyCoord(bottom_left.data.lon + width, bottom_left.data.lat + height, frame=bottom_left.frame.name) else: raise ValueError(""Invalid input, bottom_left and top_right must either be SkyCoord"") return (bottom_left, top_right) " 24857,"def my_func(self): """"""This is a docstring. Returns ------- bool Always False """""" return False ","def my_func(self): """"""find_numpy_returns Returns ------- bool Always False """""" return False " 26693,"def get_credentials_and_project_id( gcp_key_path: Optional[str] = None, gcp_scopes: Optional[str] = None ) -> google.auth.credentials.Credentials: """""" Returns the Credentials object for Google API and the associated project_id It will get the credentials from .json file if `gcp_key_path` is provided. Otherwise, return default credentials for the current environment :param gcp_key_path: Path to GCP Credential JSON file :type gcp_key_path: str :param gcp_scopes: Comma-separated string containing GCP scopes :type gcp_scopes: str :return: Google Auth Credentials :type: google.auth.credentials.Credentials """""" scopes = [s.strip() for s in gcp_scopes.split(',')] \ if gcp_scopes else DEFAULT_SCOPES if gcp_key_path: # Get credentials from a JSON file. if gcp_key_path.endswith('.json'): credentials = ( google.oauth2.service_account.Credentials.from_service_account_file( filename=gcp_key_path, scopes=scopes) ) project_id = credentials.project_id elif gcp_key_path.endswith('.p12'): raise AirflowException( 'Legacy P12 key file are not supported, use a JSON key file.' ) else: raise AirflowException('Unrecognised extension for key file.') else: credentials, project_id = google.auth.default(scopes=scopes) return credentials, project_id ","def get_credentials_and_project_id( gcp_key_path: Optional[str] = None, gcp_scopes: Optional[str] = None ) -> Tuple[google.auth.credentials.Credentials, str]: """""" Returns the Credentials object for Google API and the associated project_id It will get the credentials from .json file if `gcp_key_path` is provided. Otherwise, return default credentials for the current environment :param gcp_key_path: Path to GCP Credential JSON file :type gcp_key_path: str :param gcp_scopes: Comma-separated string containing GCP scopes :type gcp_scopes: str :return: Google Auth Credentials :type: google.auth.credentials.Credentials """""" scopes = [s.strip() for s in gcp_scopes.split(',')] \ if gcp_scopes else DEFAULT_SCOPES if gcp_key_path: # Get credentials from a JSON file. if gcp_key_path.endswith('.json'): credentials = ( google.oauth2.service_account.Credentials.from_service_account_file( filename=gcp_key_path, scopes=scopes) ) project_id = credentials.project_id elif gcp_key_path.endswith('.p12'): raise AirflowException( 'Legacy P12 key file are not supported, use a JSON key file.' ) else: raise AirflowException('Unrecognised extension for key file.') else: credentials, project_id = google.auth.default(scopes=scopes) return credentials, project_id " 41612,"def get_truested(config: Config, repository_name: str) -> Optional[bool]: trusted = config.get(""certificates.{}.trusted"".format(repository_name)) if trusted: return bool(trusted) else: return None ","def get_trusted(config: Config, repository_name: str) -> Optional[bool]: trusted = config.get(""certificates.{}.trusted"".format(repository_name)) if trusted: return bool(trusted) else: return None " 38363,"def _parse_raw_answer_dict(d, h5grp): """""" Doc string. """""" for k, v in d.items(): if isinstance(v, dict): h5_sub_grp = h5grp.create_group(k) _parse_raw_answer_dict(v, h5_sub_grp) else: if not isinstance(k, str): k = str(k) h5grp.create_dataset(k, data=v) ","def _parse_raw_answer_dict(d, h5grp): """""" Doc string. """""" for k, v in d.items(): if isinstance(v, dict): h5_sub_grp = h5grp.create_group(k) _parse_raw_answer_dict(v, h5_sub_grp) else: k = str(k) h5grp.create_dataset(k, data=v) " 7167,"def _exclude_border(mask, footprint, exclude_border): """""" Remove peaks round the borders """""" # zero out the image borders for i in range(mask.ndim): mask = mask.swapaxes(0, i) remove = (footprint.shape[i] if footprint is not None else 2 * exclude_border) mask[:remove // 2] = mask[-remove // 2:] = False mask = mask.swapaxes(0, i) return mask ","def _exclude_border(mask, footprint, exclude_border): """""" Remove peaks round the borders. """""" # zero out the image borders for i in range(mask.ndim): mask = mask.swapaxes(0, i) remove = (footprint.shape[i] if footprint is not None else 2 * exclude_border) mask[:remove // 2] = mask[-remove // 2:] = False mask = mask.swapaxes(0, i) return mask " 31757,"def update_group_command(client, args): scim = verify_and_load_scim_data(args.get('scim')) group_id = scim.get('id') group_name = scim.get('displayName') if not group_id: return_error(""You must supply 'id' in the scim data"") member_ids_to_add = args.get('memberIdsToAdd') member_ids_to_delete = args.get('memberIdsToDelete') if member_ids_to_add: if type(member_ids_to_add) != list: member_ids_to_add = json.loads(member_ids_to_add) for member_id in member_ids_to_add: operation = { ""op"": ""add"", ""path"": ""members"", ""value"": [{""value"": member_id}] } group_input = {'schemas': [patchSchema], 'Operations': [operation]} res = client.update_group(group_id, group_input) if res.status_code != 204: res_json = res.json() generic_iam_context = OutputContext(success=False, displayName=group_name, iden=member_id, errorCode=res_json.get('code'), errorMessage=res_json.get('message'), details=res_json) readable_output = tableToMarkdown('AWS Update Group:', generic_iam_context.data, removeNull=True) return CommandResults( raw_response=generic_iam_context.data, outputs_prefix=generic_iam_context.command, outputs_key_field='id', outputs=generic_iam_context.data, readable_output=readable_output ) if member_ids_to_delete: if type(member_ids_to_delete) is not list: member_ids_to_delete = json.loads(member_ids_to_delete) for member_id in member_ids_to_delete: operation = { ""op"": ""remove"", ""path"": ""members"", ""value"": [{""value"": member_id}] } group_input = {'schemas': [patchSchema], 'Operations': [operation]} res = client.update_group(group_id, group_input) if res.status_code != 204: res_json = res.json() generic_iam_context = OutputContext(success=False, displayName=group_name, iden=member_id, errorCode=res_json.get('code'), errorMessage=res_json.get('message'), details=res_json) readable_output = tableToMarkdown('AWS Update Group:', generic_iam_context.data, removeNull=True) return CommandResults( raw_response=generic_iam_context.data, outputs_prefix=generic_iam_context.command, outputs_key_field='id', outputs=generic_iam_context.data, readable_output=readable_output ) if res.status_code == 204: res_json = res.headers generic_iam_context = OutputContext(success=True, iden=group_id, displayName=group_name, details=str(res_json)) elif res.status_code == 404: res_json = res.json() generic_iam_context = OutputContext(success=False, iden=group_id, displayName=group_name, errorCode=404, errorMessage=""Group/User Not Found or User not a member of group"", details=res_json) else: res_json = res.json() generic_iam_context = OutputContext(success=False, iden=group_id, displayName=group_name, errorCode=res_json.get('code'), errorMessage=res_json.get('message'), details=res_json) readable_output = tableToMarkdown('AWS Update Group:', generic_iam_context.data, removeNull=True) return CommandResults( raw_response=generic_iam_context.data, outputs_prefix=generic_iam_context.command, outputs_key_field='id', outputs=generic_iam_context.data, readable_output=readable_output ) ","def update_group_command(client, args): scim = verify_and_load_scim_data(args.get('scim')) group_id = scim.get('id') group_name = scim.get('displayName') if not group_id: return_error(""You must supply 'id' in the scim data"") member_ids_to_add = args.get('memberIdsToAdd') member_ids_to_delete = args.get('memberIdsToDelete') if member_ids_to_add: if not isinstance(member_ids_to_add, list): member_ids_to_add = json.loads(member_ids_to_add) for member_id in member_ids_to_add: operation = { ""op"": ""add"", ""path"": ""members"", ""value"": [{""value"": member_id}] } group_input = {'schemas': [patchSchema], 'Operations': [operation]} res = client.update_group(group_id, group_input) if res.status_code != 204: res_json = res.json() generic_iam_context = OutputContext(success=False, displayName=group_name, iden=member_id, errorCode=res_json.get('code'), errorMessage=res_json.get('message'), details=res_json) readable_output = tableToMarkdown('AWS Update Group:', generic_iam_context.data, removeNull=True) return CommandResults( raw_response=generic_iam_context.data, outputs_prefix=generic_iam_context.command, outputs_key_field='id', outputs=generic_iam_context.data, readable_output=readable_output ) if member_ids_to_delete: if type(member_ids_to_delete) is not list: member_ids_to_delete = json.loads(member_ids_to_delete) for member_id in member_ids_to_delete: operation = { ""op"": ""remove"", ""path"": ""members"", ""value"": [{""value"": member_id}] } group_input = {'schemas': [patchSchema], 'Operations': [operation]} res = client.update_group(group_id, group_input) if res.status_code != 204: res_json = res.json() generic_iam_context = OutputContext(success=False, displayName=group_name, iden=member_id, errorCode=res_json.get('code'), errorMessage=res_json.get('message'), details=res_json) readable_output = tableToMarkdown('AWS Update Group:', generic_iam_context.data, removeNull=True) return CommandResults( raw_response=generic_iam_context.data, outputs_prefix=generic_iam_context.command, outputs_key_field='id', outputs=generic_iam_context.data, readable_output=readable_output ) if res.status_code == 204: res_json = res.headers generic_iam_context = OutputContext(success=True, iden=group_id, displayName=group_name, details=str(res_json)) elif res.status_code == 404: res_json = res.json() generic_iam_context = OutputContext(success=False, iden=group_id, displayName=group_name, errorCode=404, errorMessage=""Group/User Not Found or User not a member of group"", details=res_json) else: res_json = res.json() generic_iam_context = OutputContext(success=False, iden=group_id, displayName=group_name, errorCode=res_json.get('code'), errorMessage=res_json.get('message'), details=res_json) readable_output = tableToMarkdown('AWS Update Group:', generic_iam_context.data, removeNull=True) return CommandResults( raw_response=generic_iam_context.data, outputs_prefix=generic_iam_context.command, outputs_key_field='id', outputs=generic_iam_context.data, readable_output=readable_output ) " 1969,"def _beta_divergence(X, W, H, beta, square_root=False): """"""Compute the beta-divergence of X and dot(W, H). Parameters ---------- X : float or array-like of shape (n_samples, n_features) W : float or dense array-like of shape (n_samples, n_components) H : float or dense array-like of shape (n_components, n_features) beta : float or string in {'frobenius', 'kullback-leibler', \ 'itakura-saito'} Parameter of the beta-divergence. If beta == 2, this is half the Frobenius *squared* norm. If beta == 1, this is the generalized Kullback-Leibler divergence. If beta == 0, this is the Itakura-Saito divergence. Else, this is the general beta-divergence. square_root : bool, default=False If True, return np.sqrt(2 * res) For beta == 2, it corresponds to the Frobenius norm. Returns ------- res : float Beta divergence of X and np.dot(X, H) """""" beta = _beta_loss_to_float(beta) # The method can be called with scalars if not sp.issparse(X): X = np.atleast_2d(X) W = np.atleast_2d(W) H = np.atleast_2d(H) # Frobenius norm if beta == 2: # Avoid the creation of the dense np.dot(W, H) if X is sparse. if sp.issparse(X): norm_X = np.dot(X.data, X.data) norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H) cross_prod = trace_dot((X * H.T), W) res = (norm_X + norm_WH - 2. * cross_prod) / 2. else: res = squared_norm(X - np.dot(W, H)) / 2. if square_root: return np.sqrt(res * 2) else: return res if sp.issparse(X): # compute np.dot(W, H) only where X is nonzero WH_data = _special_sparse_dot(W, H, X).data X_data = X.data else: WH = np.dot(W, H) WH_data = WH.ravel() X_data = X.ravel() # do not affect the zeros: here 0 ** (-1) = 0 and not infinity indices = X_data > EPSILON WH_data = WH_data[indices] X_data = X_data[indices] # used to avoid division by zero WH_data[WH_data == 0] = EPSILON # generalized Kullback-Leibler divergence if beta == 1: # fast and memory efficient computation of np.sum(np.dot(W, H)) sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1)) # computes np.sum(X * log(X / WH)) only where X is nonzero div = X_data / WH_data res = np.dot(X_data, np.log(div)) # add full np.sum(np.dot(W, H)) - np.sum(X) res += sum_WH - X_data.sum() # Itakura-Saito divergence elif beta == 0: div = X_data / WH_data res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div)) # beta-divergence, beta not in (0, 1, 2) else: if sp.issparse(X): # slow loop, but memory efficient computation of : # np.sum(np.dot(W, H) ** beta) sum_WH_beta = 0 for i in range(X.shape[1]): sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta) else: sum_WH_beta = np.sum(WH ** beta) sum_X_WH = np.dot(X_data, WH_data ** (beta - 1)) res = (X_data ** beta).sum() - beta * sum_X_WH res += sum_WH_beta * (beta - 1) res /= beta * (beta - 1) if square_root: return np.sqrt(2 * res) else: return res ","def _beta_divergence(X, W, H, beta, square_root=False): """"""Compute the beta-divergence of X and dot(W, H). Parameters ---------- X : float or array-like of shape (n_samples, n_features) W : float or dense array-like of shape (n_samples, n_components) H : float or array-like of shape (n_components, n_features) beta : float or string in {'frobenius', 'kullback-leibler', \ 'itakura-saito'} Parameter of the beta-divergence. If beta == 2, this is half the Frobenius *squared* norm. If beta == 1, this is the generalized Kullback-Leibler divergence. If beta == 0, this is the Itakura-Saito divergence. Else, this is the general beta-divergence. square_root : bool, default=False If True, return np.sqrt(2 * res) For beta == 2, it corresponds to the Frobenius norm. Returns ------- res : float Beta divergence of X and np.dot(X, H) """""" beta = _beta_loss_to_float(beta) # The method can be called with scalars if not sp.issparse(X): X = np.atleast_2d(X) W = np.atleast_2d(W) H = np.atleast_2d(H) # Frobenius norm if beta == 2: # Avoid the creation of the dense np.dot(W, H) if X is sparse. if sp.issparse(X): norm_X = np.dot(X.data, X.data) norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H) cross_prod = trace_dot((X * H.T), W) res = (norm_X + norm_WH - 2. * cross_prod) / 2. else: res = squared_norm(X - np.dot(W, H)) / 2. if square_root: return np.sqrt(res * 2) else: return res if sp.issparse(X): # compute np.dot(W, H) only where X is nonzero WH_data = _special_sparse_dot(W, H, X).data X_data = X.data else: WH = np.dot(W, H) WH_data = WH.ravel() X_data = X.ravel() # do not affect the zeros: here 0 ** (-1) = 0 and not infinity indices = X_data > EPSILON WH_data = WH_data[indices] X_data = X_data[indices] # used to avoid division by zero WH_data[WH_data == 0] = EPSILON # generalized Kullback-Leibler divergence if beta == 1: # fast and memory efficient computation of np.sum(np.dot(W, H)) sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1)) # computes np.sum(X * log(X / WH)) only where X is nonzero div = X_data / WH_data res = np.dot(X_data, np.log(div)) # add full np.sum(np.dot(W, H)) - np.sum(X) res += sum_WH - X_data.sum() # Itakura-Saito divergence elif beta == 0: div = X_data / WH_data res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div)) # beta-divergence, beta not in (0, 1, 2) else: if sp.issparse(X): # slow loop, but memory efficient computation of : # np.sum(np.dot(W, H) ** beta) sum_WH_beta = 0 for i in range(X.shape[1]): sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta) else: sum_WH_beta = np.sum(WH ** beta) sum_X_WH = np.dot(X_data, WH_data ** (beta - 1)) res = (X_data ** beta).sum() - beta * sum_X_WH res += sum_WH_beta * (beta - 1) res /= beta * (beta - 1) if square_root: return np.sqrt(2 * res) else: return res " 32476,"def get_latest_result_command(intezer_api: IntezerApi, args: Dict[str, str]) -> CommandResults: file_hash = args.get('file_hash') if not file_hash: raise ValueError('Missing file hash') latest_analysis = FileAnalysis.from_latest_hash_analysis(file_hash=file_hash, api=intezer_api, requester=REQUESTER) if not latest_analysis: return _get_missing_file_result(file_hash) file_metadata = latest_analysis.get_root_analysis().metadata return enrich_dbot_and_display_file_analysis_results(latest_analysis.result(), file_metadata) ","def get_latest_result_command(intezer_api: IntezerApi, args: Dict[str, str]) -> CommandResults: file_hash = args.get('file_hash') if not file_hash: raise ValueError('Missing file hash') latest_analysis = FileAnalysis.from_latest_hash_analysis(file_hash=file_hash, api=intezer_api, requester=REQUESTER) if not latest_analysis: return _get_missing_file_result(file_hash) file_metadata = latest_analysis.get_root_analysis().metadata() return enrich_dbot_and_display_file_analysis_results(latest_analysis.result(), file_metadata) " 53564,"def add(x, y): return x + y ","def square(x): return x * x square(5, x=4) # [redundant-keyword-arg] " 38663,"def test_list_contains(): l = [1, 2] assert 2 in sn.defer(l) ","def test_contains_list(): l = [1, 2] assert 2 in sn.defer(l) " 33773,"def build_java_worker_command( redis_address, plasma_store_name, raylet_name, redis_password, session_dir, node_ip_address, ): """"""This method assembles the command used to start a Java worker. Args: redis_address (str): Redis address of GCS. plasma_store_name (str): The name of the plasma store socket to connect to. raylet_name (str): The name of the raylet socket to create. redis_password (str): The password of connect to redis. session_dir (str): The path of this session. node_ip_address (str): The ip address for this node. Returns: The command string for starting Java worker. """""" pairs = [] if redis_address is not None: pairs.append((""ray.address"", redis_address)) pairs.append((""ray.raylet.node-manager-port"", ""RAY_NODE_MANAGER_PORT_PLACEHOLDER"")) if plasma_store_name is not None: pairs.append((""ray.object-store.socket-name"", plasma_store_name)) if raylet_name is not None: pairs.append((""ray.raylet.socket-name"", raylet_name)) if redis_password is not None: pairs.append((""ray.redis.password"", redis_password)) if node_ip_address is not None: pairs.append((""ray.node-ip"", node_ip_address)) pairs.append((""ray.home"", RAY_HOME)) pairs.append((""ray.logging.dir"", os.path.join(session_dir, ""logs""))) pairs.append((""ray.session-dir"", session_dir)) command = [""java""] + [""-D{}={}"".format(*pair) for pair in pairs] # Add ray jars path to java classpath ray_jars = os.path.join(get_ray_jars_dir(), ""*"") options = [""-cp"", ray_jars] # above options. command += options command += [""RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER""] command += [""io.ray.runtime.runner.worker.DefaultWorker""] return command ","def build_java_worker_command( redis_address, plasma_store_name, raylet_name, redis_password, session_dir, node_ip_address, ): """"""This method assembles the command used to start a Java worker. Args: redis_address (str): Redis address of GCS. plasma_store_name (str): The name of the plasma store socket to connect to. raylet_name (str): The name of the raylet socket to create. redis_password (str): The password of connect to redis. session_dir (str): The path of this session. node_ip_address (str): The ip address for this node. Returns: The command string for starting Java worker. """""" pairs = [] if redis_address is not None: pairs.append((""ray.address"", redis_address)) pairs.append((""ray.raylet.node-manager-port"", ""RAY_NODE_MANAGER_PORT_PLACEHOLDER"")) if plasma_store_name is not None: pairs.append((""ray.object-store.socket-name"", plasma_store_name)) if raylet_name is not None: pairs.append((""ray.raylet.socket-name"", raylet_name)) if redis_password is not None: pairs.append((""ray.redis.password"", redis_password)) if node_ip_address is not None: pairs.append((""ray.node-ip"", node_ip_address)) pairs.append((""ray.home"", RAY_HOME)) pairs.append((""ray.logging.dir"", os.path.join(session_dir, ""logs""))) pairs.append((""ray.session-dir"", session_dir)) command = [""java""] + [""-D{}={}"".format(*pair) for pair in pairs] # Add ray jars path to java classpath ray_jars = os.path.join(get_ray_jars_dir(), ""*"") command += [""-cp"", ray_jars] command += [""RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER""] command += [""io.ray.runtime.runner.worker.DefaultWorker""] return command " 46061,"def bilinear_grid_sample(im: Tensor, grid: Tensor, align_corners: Optional[bool] = False) -> Tensor: """"""Given an input and a flow-field grid, computes the output using input values and pixel locations from grid. Supported only bilinear interpolation method to sample the input pixels. Args: im (torch.Tensor): Input feature map, shape (N, C, H, W) grid (torch.Tensor): Point coordinates, shape (N, Hg, Wg, 2) align_corners {bool}: If set to True, the extrema (-1 and 1) are considered as referring to the center points of the input’s corner pixels. If set to False, they are instead considered as referring to the corner points of the input’s corner pixels, making the sampling more resolution agnostic. Returns: torch.Tensor: A tensor with sampled points, shape (N, C, Hg, Wg) """""" n, c, h, w = im.shape gn, gh, gw, _ = grid.shape assert n == gn x = grid[:, :, :, 0] y = grid[:, :, :, 1] if align_corners: x = ((x + 1) / 2) * (w - 1) y = ((y + 1) / 2) * (h - 1) else: x = ((x + 1) * w - 1) / 2 y = ((y + 1) * h - 1) / 2 x = x.view(n, -1) y = y.view(n, -1) x0 = torch.floor(x).long() y0 = torch.floor(y).long() x1 = x0 + 1 y1 = y0 + 1 wa = ((x1 - x) * (y1 - y)).unsqueeze(1) wb = ((x1 - x) * (y - y0)).unsqueeze(1) wc = ((x - x0) * (y1 - y)).unsqueeze(1) wd = ((x - x0) * (y - y0)).unsqueeze(1) # Apply default for grid_sample function zero padding im_padded = F.pad(im, pad=[1, 1, 1, 1], mode='constant', value=0) padded_h = h + 2 padded_w = w + 2 # save points positions after padding x0, x1, y0, y1 = x0 + 1, x1 + 1, y0 + 1, y1 + 1 # Clip coordinates to padded image size x0 = torch.where(x0 < 0, torch.tensor(0), x0) x0 = torch.where(x0 > padded_w - 1, torch.tensor(padded_w - 1), x0) x1 = torch.where(x1 < 0, torch.tensor(0), x1) x1 = torch.where(x1 > padded_w - 1, torch.tensor(padded_w - 1), x1) y0 = torch.where(y0 < 0, torch.tensor(0), y0) y0 = torch.where(y0 > padded_h - 1, torch.tensor(padded_h - 1), y0) y1 = torch.where(y1 < 0, torch.tensor(0), y1) y1 = torch.where(y1 > padded_h - 1, torch.tensor(padded_h - 1), y1) im_padded = im_padded.view(n, c, -1) x0_y0 = (x0 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1) x0_y1 = (x0 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1) x1_y0 = (x1 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1) x1_y1 = (x1 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1) Ia = torch.gather(im_padded, 2, x0_y0) Ib = torch.gather(im_padded, 2, x0_y1) Ic = torch.gather(im_padded, 2, x1_y0) Id = torch.gather(im_padded, 2, x1_y1) return (Ia * wa + Ib * wb + Ic * wc + Id * wd).reshape(n, c, gh, gw) ","def bilinear_grid_sample(im: Tensor, grid: Tensor, align_corners: bool = False) -> Tensor: """"""Given an input and a flow-field grid, computes the output using input values and pixel locations from grid. Supported only bilinear interpolation method to sample the input pixels. Args: im (torch.Tensor): Input feature map, shape (N, C, H, W) grid (torch.Tensor): Point coordinates, shape (N, Hg, Wg, 2) align_corners {bool}: If set to True, the extrema (-1 and 1) are considered as referring to the center points of the input’s corner pixels. If set to False, they are instead considered as referring to the corner points of the input’s corner pixels, making the sampling more resolution agnostic. Returns: torch.Tensor: A tensor with sampled points, shape (N, C, Hg, Wg) """""" n, c, h, w = im.shape gn, gh, gw, _ = grid.shape assert n == gn x = grid[:, :, :, 0] y = grid[:, :, :, 1] if align_corners: x = ((x + 1) / 2) * (w - 1) y = ((y + 1) / 2) * (h - 1) else: x = ((x + 1) * w - 1) / 2 y = ((y + 1) * h - 1) / 2 x = x.view(n, -1) y = y.view(n, -1) x0 = torch.floor(x).long() y0 = torch.floor(y).long() x1 = x0 + 1 y1 = y0 + 1 wa = ((x1 - x) * (y1 - y)).unsqueeze(1) wb = ((x1 - x) * (y - y0)).unsqueeze(1) wc = ((x - x0) * (y1 - y)).unsqueeze(1) wd = ((x - x0) * (y - y0)).unsqueeze(1) # Apply default for grid_sample function zero padding im_padded = F.pad(im, pad=[1, 1, 1, 1], mode='constant', value=0) padded_h = h + 2 padded_w = w + 2 # save points positions after padding x0, x1, y0, y1 = x0 + 1, x1 + 1, y0 + 1, y1 + 1 # Clip coordinates to padded image size x0 = torch.where(x0 < 0, torch.tensor(0), x0) x0 = torch.where(x0 > padded_w - 1, torch.tensor(padded_w - 1), x0) x1 = torch.where(x1 < 0, torch.tensor(0), x1) x1 = torch.where(x1 > padded_w - 1, torch.tensor(padded_w - 1), x1) y0 = torch.where(y0 < 0, torch.tensor(0), y0) y0 = torch.where(y0 > padded_h - 1, torch.tensor(padded_h - 1), y0) y1 = torch.where(y1 < 0, torch.tensor(0), y1) y1 = torch.where(y1 > padded_h - 1, torch.tensor(padded_h - 1), y1) im_padded = im_padded.view(n, c, -1) x0_y0 = (x0 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1) x0_y1 = (x0 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1) x1_y0 = (x1 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1) x1_y1 = (x1 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1) Ia = torch.gather(im_padded, 2, x0_y0) Ib = torch.gather(im_padded, 2, x0_y1) Ic = torch.gather(im_padded, 2, x1_y0) Id = torch.gather(im_padded, 2, x1_y1) return (Ia * wa + Ib * wb + Ic * wc + Id * wd).reshape(n, c, gh, gw) " 31111,"def fetch_incidents(last_run: dict, first_fetch_period: str): start_timestamp = last_run.get(""start_time"", None) if last_run else None # noinspection PyTypeChecker result = advanced_case_search( startTimestamp=start_timestamp if start_timestamp else first_fetch_period, endTimestamp=""now"", limit=demisto.params().get(""max_limit"", 0), sortBy=[""createdTimestamp""], priority=build_argus_priority_from_min_severity( demisto.params().get(""min_severity"", ""medium"") ), subCriteria=[ {""exclude"": True, ""status"": [""closed""]}, ], timeFieldStrategy=[""createdTimestamp""], ) incidents = [] for case in result[""data""]: incidents.append( { ""name"": f""#{case['id']}: {case['subject']}"", ""occurred"": case[""createdTime""], ""severity"": argus_priority_to_demisto_severity(case[""priority""]), ""status"": argus_status_to_demisto_status(case[""status""]), ""details"": case[""description""], ""customFields"": { ""argus_id"": str(case[""id""]), ""type"": case[""type""], ""category"": case[""category""][""name""] if case[""category""] else None, ""service"": case[""service""][""name""], ""lastUpdatedTime"": case[""lastUpdatedTime""], ""createdTimestamp"": case[""createdTimestamp""], ""customer"": case[""customer""][""shortName""], }, ""rawJson"": json.dumps(case), } ) if result[""data""]: last_run[""start_time""] = result[""data""][-1][""createdTimestamp""] + 1 return last_run, incidents ","def fetch_incidents(last_run: dict, first_fetch_period: str): start_timestamp = last_run.get(""start_time"", None) if last_run else None # noinspection PyTypeChecker result = advanced_case_search( startTimestamp=start_timestamp if start_timestamp else first_fetch_period, endTimestamp=""now"", limit=demisto.params().get(""max_limit"", 0), sortBy=[""createdTimestamp""], priority=build_argus_priority_from_min_severity( demisto.params().get(""min_severity"", ""medium"") ), subCriteria=[ {""exclude"": True, ""status"": [""closed""]}, ], timeFieldStrategy=[""createdTimestamp""], ) incidents = [] for case in result[""data""]: incidents.append( { ""name"": f""#{case['id']}: {case['subject']}"", ""occurred"": case[""createdTime""], ""severity"": argus_priority_to_demisto_severity(case[""priority""]), ""status"": argus_status_to_demisto_status(case[""status""]), ""details"": case[""description""], ""customFields"": { ""argus_id"": str(case[""id""]), ""type"": case[""type""], ""category"": case[""category""][""name""] if case[""category""] else None, ""service"": case[""service""][""name""], ""lastUpdatedTime"": case[""lastUpdatedTime""], ""createdTimestamp"": case[""createdTimestamp""], ""customer"": case[""customer""][""shortName""], }, ""rawJSON"": json.dumps(case), } ) if result[""data""]: last_run[""start_time""] = result[""data""][-1][""createdTimestamp""] + 1 return last_run, incidents " 36270,"def scrublet_score_distribution( adata, scale_hist_obs: str = 'log', scale_hist_sim: str = 'linear', figsize: Optional[Tuple[float, float]] = (8, 3), return_fig: bool = False, show: bool = True, save: Optional[Union[str, bool]] = None, ): """"""\ Plot histogram of doublet scores for observed transcriptomes and simulated doublets. The histogram for simulated doublets is useful for determining the correct doublet score threshold. Parameters ---------- adata An annData object resulting from func:`~scanpy.external.scrublet`. scale_hist_obs Set y axis scale transformation in matplotlib for the plot of observed transcriptomes (e.g. ""linear"", ""log"", ""symlog"", ""logit"") scale_hist_sim Set y axis scale transformation in matplotlib for the plot of simulated doublets (e.g. ""linear"", ""log"", ""symlog"", ""logit"") figsize width, height show Show the plot, do not return axis. save If `True` or a `str`, save the figure. A string is appended to the default filename. Infer the filetype if ending on {`'.pdf'`, `'.png'`, `'.svg'`}. Returns ------- If `return_fig` is True, a :class:`~matplotlib.figure.Figure`. If `show==False` a list of :class:`~matplotlib.axes.Axes`. See also -------- :func:`~scanpy.external.pp.scrublet`: Main way of running Scrublet, runs preprocessing, doublet simulation and calling. :func:`~scanpy.external.pp.scrublet_simulate_doublets`: Run Scrublet's doublet simulation separately for advanced usage. """""" threshold = None if 'threshold' in adata.uns['scrublet']: threshold = adata.uns['scrublet']['threshold'] fig, axs = plt.subplots(1, 2, figsize=figsize) ax = axs[0] ax.hist( adata.obs['doublet_score'], np.linspace(0, 1, 50), color='gray', linewidth=0, density=True, ) ax.set_yscale(scale_hist_obs) yl = ax.get_ylim() ax.set_ylim(yl) if threshold is not None: ax.plot(threshold * np.ones(2), yl, c='black', linewidth=1) ax.set_title('Observed transcriptomes') ax.set_xlabel('Doublet score') ax.set_ylabel('Prob. density') ax = axs[1] ax.hist( adata.uns['scrublet']['doublet_scores_sim'], np.linspace(0, 1, 50), color='gray', linewidth=0, density=True, ) ax.set_yscale(scale_hist_sim) yl = ax.get_ylim() ax.set_ylim(yl) if threshold is not None: ax.plot(threshold * np.ones(2), yl, c='black', linewidth=1) ax.set_title('Simulated doublets') ax.set_xlabel('Doublet score') ax.set_ylabel('Prob. density') fig.tight_layout() _utils.savefig_or_show('scrublet_score_distribution', show=show, save=save) if return_fig: return fig elif not show: return axs ","def scrublet_score_distribution( adata, scale_hist_obs: str = 'log', scale_hist_sim: str = 'linear', figsize: Optional[Tuple[float, float]] = (8, 3), return_fig: bool = False, show: bool = True, save: Optional[Union[str, bool]] = None, ): """"""\ Plot histogram of doublet scores for observed transcriptomes and simulated doublets. The histogram for simulated doublets is useful for determining the correct doublet score threshold. Parameters ---------- adata An annData object resulting from func:`~scanpy.external.scrublet`. scale_hist_obs Set y axis scale transformation in matplotlib for the plot of observed transcriptomes (e.g. ""linear"", ""log"", ""symlog"", ""logit"") scale_hist_sim Set y axis scale transformation in matplotlib for the plot of simulated doublets (e.g. ""linear"", ""log"", ""symlog"", ""logit"") figsize width, height show Show the plot, do not return axis. save If `True` or a `str`, save the figure. A string is appended to the default filename. Infer the filetype if ending on {`'.pdf'`, `'.png'`, `'.svg'`}. Returns ------- If `return_fig` is True, a :class:`~matplotlib.figure.Figure`. If `show==False` a list of :class:`~matplotlib.axes.Axes`. See also -------- :func:`~scanpy.external.pp.scrublet`: Main way of running Scrublet, runs preprocessing, doublet simulation and calling. :func:`~scanpy.external.pp.scrublet_simulate_doublets`: Run Scrublet's doublet simulation separately for advanced usage. """""" threshold = adata.uns[""scrublet""].get(""threshold"", None) fig, axs = plt.subplots(1, 2, figsize=figsize) ax = axs[0] ax.hist( adata.obs['doublet_score'], np.linspace(0, 1, 50), color='gray', linewidth=0, density=True, ) ax.set_yscale(scale_hist_obs) yl = ax.get_ylim() ax.set_ylim(yl) if threshold is not None: ax.plot(threshold * np.ones(2), yl, c='black', linewidth=1) ax.set_title('Observed transcriptomes') ax.set_xlabel('Doublet score') ax.set_ylabel('Prob. density') ax = axs[1] ax.hist( adata.uns['scrublet']['doublet_scores_sim'], np.linspace(0, 1, 50), color='gray', linewidth=0, density=True, ) ax.set_yscale(scale_hist_sim) yl = ax.get_ylim() ax.set_ylim(yl) if threshold is not None: ax.plot(threshold * np.ones(2), yl, c='black', linewidth=1) ax.set_title('Simulated doublets') ax.set_xlabel('Doublet score') ax.set_ylabel('Prob. density') fig.tight_layout() _utils.savefig_or_show('scrublet_score_distribution', show=show, save=save) if return_fig: return fig elif not show: return axs " 54724,"def _check_instance(entry: Dict): """""" Function to check instances in the expand_grid function. This checks if entry is a dictionary, checks the instance of value in key:value pairs in entry, and makes changes to other types as deemed necessary. Additionally, type-specific errors are raised if unsupported data types are passed in as values in the entry dictionary. How each type is handled, and their associated exceptions, are pretty clear from the code. """""" # dictionary should not be empty if not entry: raise ValueError(""passed dictionary cannot be empty"") entry = { # If it is a NoneType, number, Boolean, or string, # then wrap in a list key: [value] if isinstance(value, (type(None), int, float, bool, str)) else tuple(value) if isinstance(value, (set, range)) else value for key, value in entry.items() } for _, value in entry.items(): # exclude dicts: if isinstance(value, dict): raise TypeError(""Nested dictionaries are not allowed"") # process arrays if isinstance(value, np.ndarray): if value.size == 0: raise ValueError(""array cannot be empty"") # process series if isinstance(value, pd.Series): if value.empty: raise ValueError(""passed Series cannot be empty"") if isinstance(value.index, pd.MultiIndex): raise TypeError( ""`expand_grid` does not work with pd.MultiIndex"" ) # process dataframe if isinstance(value, pd.DataFrame): if value.empty: raise ValueError(""passed DataFrame cannot be empty"") if (isinstance(value.index, pd.MultiIndex)) or ( isinstance(value.columns, pd.MultiIndex) ): raise TypeError( ""`expand_grid` does not work with pd.MultiIndex"" ) # process lists if isinstance(value, (list, tuple)): if not value: raise ValueError(""passed Sequence cannot be empty"") return entry ","def _check_instance(entry: Dict): """""" Function to check instances in the expand_grid function. This checks if entry is a dictionary, checks the instance of value in key:value pairs in entry, and makes changes to other types as deemed necessary. Additionally, type-specific errors are raised if unsupported data types are passed in as values in the entry dictionary. How each type is handled, and their associated exceptions, are pretty clear from the code. """""" # dictionary should not be empty if not entry: raise ValueError(""passed dictionary cannot be empty"") entry = { # If it is a NoneType, number, Boolean, or string, # then wrap in a list key: [value] if isinstance(value, (type(None), int, float, bool, str)) else tuple(value) if isinstance(value, (set, range)) else value for key, value in entry.items() } for _, value in entry.items(): # exclude dicts: if isinstance(value, dict): raise TypeError(""Nested dictionaries are not allowed"") # process arrays if isinstance(value, np.ndarray): if value.size == 0: raise ValueError(""array cannot be empty"") # process series if isinstance(value, pd.Series): if value.empty: raise ValueError(""passed Series cannot be empty"") if isinstance(value.index, pd.MultiIndex): raise TypeError( ""`expand_grid` does not work with pd.MultiIndex"" ) # process dataframe if isinstance(value, pd.DataFrame): if value.empty: raise ValueError(""passed DataFrame cannot be empty"") if (isinstance(value.index, pd.MultiIndex)) or ( isinstance(value.columns, pd.MultiIndex) ): raise TypeError( ""`expand_grid` does not work with pd.MultiIndex"" ) # process lists if isinstance(value, (list, tuple)): if not value: raise ValueError(""passed Sequence cannot be empty"") return entry " 38649,"def main(): # Setup command line options argparser = argparse.ArgumentParser() output_options = argparser.add_argument_group('Options controlling output') locate_options = argparser.add_argument_group( 'Options for locating checks') select_options = argparser.add_argument_group( 'Options for selecting checks') action_options = argparser.add_argument_group( 'Options controlling actions') run_options = argparser.add_argument_group( 'Options controlling execution of checks') env_options = argparser.add_argument_group( 'Options controlling environment') misc_options = argparser.add_argument_group('Miscellaneous options') # Output directory options output_options.add_argument( '--prefix', action='store', metavar='DIR', help='Set output directory prefix to DIR', envvar='RFM_PREFIX', configvar='systems/prefix' ) output_options.add_argument( '-o', '--output', action='store', metavar='DIR', help='Set output directory to DIR', envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir' ) output_options.add_argument( '-s', '--stage', action='store', metavar='DIR', help='Set stage directory to DIR', envvar='RFM_STAGE_DIR', configvar='systems/stagedir' ) output_options.add_argument( '--perflogdir', action='store', metavar='DIR', help=('Set directory prefix for the performance logs ' '(default: ${prefix}/perflogs, ' 'relevant only if the filelog backend is used)'), envvar='RFM_PERFLOG_DIR', configvar='logging/handlers_perflog/filelog_basedir' ) output_options.add_argument( '--keep-stage-files', action='store_true', help='Keep stage directory even if check is successful', envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files' ) output_options.add_argument( '--save-log-files', action='store_true', default=False, help=('Copy the log file from the current directory to the ' 'output directory when ReFrame ends'), envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files' ) # Check discovery options locate_options.add_argument( '-c', '--checkpath', action='append', metavar='DIR|FILE', help=""Add DIR or FILE to the check search path"", envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path' ) locate_options.add_argument( '-R', '--recursive', action='store_true', help='Load checks recursively', envvar='RFM_CHECK_SEARCH_RECURSIVE', configvar='general/check_search_recursive' ) locate_options.add_argument( '--ignore-check-conflicts', action='store_true', help='Skip checks with conflicting names', envvar='RFM_IGNORE_CHECK_CONFLICTS', configvar='general/ignore_check_conflicts' ) # Select options select_options.add_argument( '-t', '--tag', action='append', dest='tags', metavar='TAG', default=[], help='Select checks matching TAG' ) select_options.add_argument( '-n', '--name', action='append', dest='names', default=[], metavar='NAME', help='Select checks with NAME' ) select_options.add_argument( '-x', '--exclude', action='append', dest='exclude_names', metavar='NAME', default=[], help='Exclude checks with NAME' ) select_options.add_argument( '-p', '--prgenv', action='append', default=[r'.*'], help='Select tests for PRGENV programming environment only' ) select_options.add_argument( '--gpu-only', action='store_true', help='Select only GPU tests') select_options.add_argument( '--cpu-only', action='store_true', help='Select only CPU tests') # Action options action_options.add_argument( '-l', '--list', action='store_true', help='List matched regression checks') action_options.add_argument( '-L', '--list-detailed', action='store_true', help='List matched regression checks with a detailed description') action_options.add_argument( '-r', '--run', action='store_true', help='Run regression with the selected checks') # Run options run_options.add_argument( '-A', '--account', action='store', help='Use ACCOUNT for submitting jobs') run_options.add_argument( '-P', '--partition', action='store', metavar='PART', help='Use PART for submitting jobs') run_options.add_argument( '--reservation', action='store', metavar='RES', help='Use RES for submitting jobs') run_options.add_argument( '--nodelist', action='store', help='Run checks on the selected list of nodes') run_options.add_argument( '--exclude-nodes', action='store', metavar='NODELIST', help='Exclude the list of nodes from running checks') run_options.add_argument( '--job-option', action='append', metavar='OPT', dest='job_options', default=[], help='Pass OPT to job scheduler') run_options.add_argument( '--force-local', action='store_true', help='Force local execution of checks') run_options.add_argument( '--skip-sanity-check', action='store_true', help='Skip sanity checking') run_options.add_argument( '--skip-performance-check', action='store_true', help='Skip performance checking') run_options.add_argument( '--strict', action='store_true', help='Force strict performance checking') run_options.add_argument( '--skip-system-check', action='store_true', help='Skip system check') run_options.add_argument( '--skip-prgenv-check', action='store_true', help='Skip prog. environment check') run_options.add_argument( '--exec-policy', metavar='POLICY', action='store', choices=['async', 'serial'], default='async', help='Specify the execution policy for running the regression tests. ' 'Available policies: ""async"" (default), ""serial""') run_options.add_argument( '--mode', action='store', help='Execution mode to use') run_options.add_argument( '--max-retries', metavar='NUM', action='store', default=0, help='Specify the maximum number of times a failed regression test ' 'may be retried (default: 0)') run_options.add_argument( '--flex-alloc-tasks', action='store', dest='flex_alloc_tasks', metavar='{all|idle|NUM}', default=None, help='*deprecated*, please use --flex-alloc-nodes instead') run_options.add_argument( '--flex-alloc-nodes', action='store', dest='flex_alloc_nodes', metavar='{all|idle|NUM}', default=None, help=""Strategy for flexible node allocation (default: 'idle')."") env_options.add_argument( '-M', '--map-module', action='append', metavar='MAPPING', dest='module_mappings', default=[], help='Apply a single module mapping', envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings' ) env_options.add_argument( '-m', '--module', action='append', default=[], metavar='MOD', dest='user_modules', help='Load module MOD before running the regression suite', envvar='RFM_USER_MODULES ,', configvar='general/user_modules' ) env_options.add_argument( '--module-mappings', action='store', metavar='FILE', dest='module_map_file', help='Apply module mappings defined in FILE', envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file' ) env_options.add_argument( '-u', '--unload-module', action='append', metavar='MOD', dest='unload_modules', default=[], help='Unload module MOD before running the regression suite', envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules' ) env_options.add_argument( '--purge-env', action='store_true', dest='purge_env', default=False, help='Purge environment before running the regression suite', envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment' ) env_options.add_argument( '--non-default-craype', action='store_true', help='Test a non-default Cray PE', envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype' ) # Miscellaneous options misc_options.add_argument( '-C', '--config-file', action='store', dest='config_file', metavar='FILE', help='ReFrame configuration file to use', envvar='RFM_CONFIG_FILE' ) misc_options.add_argument( '--nocolor', action='store_false', dest='colorize', help='Disable coloring of output', envvar='RFM_COLORIZE', configvar='general/colorize' ) misc_options.add_argument( '--failure-stats', action='store_true', help='Print failure statistics' ) misc_options.add_argument( '--performance-report', action='store_true', help='Print a report for performance tests run' ) misc_options.add_argument( '--show-config', action='store', nargs='?', const='all', metavar='PARAM', help=( 'Print how parameter PARAM is configured ' 'for the current system and exit' ) ) misc_options.add_argument( '--system', action='store', help='Load configuration for SYSTEM', envvar='RFM_SYSTEM' ) misc_options.add_argument( '--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT', help=('Append a timestamp component to the various ' 'ReFrame directories (default format: ""%%FT%%T"")'), envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs' ) misc_options.add_argument('-V', '--version', action='version', version=os_ext.reframe_version()) misc_options.add_argument( '-v', '--verbose', action='count', help='Increase verbosity level of output', envvar='RFM_VERBOSE', configvar='general/verbose' ) # Options not associated with command-line arguments argparser.add_argument( dest='graylog_server', envvar='RFM_GRAYLOG_SERVER', configvar='logging/handlers_perflog/graylog_address', help='Graylog server address' ) if len(sys.argv) == 1: argparser.print_help() sys.exit(1) # Parse command line options = argparser.parse_args() # First configure logging with our generic configuration so as to be able # to print pretty messages; logging will be reconfigured by user's # configuration later site_config = config.load_config( os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py') ) site_config.select_subconfig('generic') options.update_config(site_config) logging.configure_logging(site_config) logging.getlogger().colorize = site_config.get('general/0/colorize') printer = PrettyPrinter() printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) # Now configure ReFrame according to the user configuration file try: try: site_config = config.load_config(options.config_file) except ReframeDeprecationWarning as e: printer.warning(e) converted = config.convert_old_config(options.config_file) printer.warning( f""configuration file has been converted "" f""to the new syntax here: '{converted}'"" ) site_config = config.load_config(converted) site_config.validate() site_config.select_subconfig(options.system) for err in options.update_config(site_config): printer.warning(str(err)) logging.configure_logging(site_config) except (OSError, ConfigError) as e: printer.error(f'failed to load configuration: {e}') sys.exit(1) logging.getlogger().colorize = site_config.get('general/0/colorize') printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) try: runtime.init_runtime(site_config) except ConfigError as e: printer.error(f'failed to initialize runtime: {e}') sys.exit(1) rt = runtime.runtime() try: if site_config.get('general/0/module_map_file'): rt.modules_system.load_mapping_from_file( site_config.get('general/0/module_map_file') ) if site_config.get('general/0/module_mappings'): for m in site_config.get('general/0/module_mappings'): rt.modules_system.load_mapping(m) except (ConfigError, OSError) as e: printer.error('could not load module mappings: %s' % e) sys.exit(1) if options.mode: try: mode_args = rt.get_option(f'modes/@{options.mode}/options') # Parse the mode's options and reparse the command-line options = argparser.parse_args(mode_args) options = argparser.parse_args(namespace=options.cmd_options) options.update_config(rt.site_config) except ConfigError as e: printer.error('could not obtain execution mode: %s' % e) sys.exit(1) if (os_ext.samefile(rt.stage_prefix, rt.output_prefix) and not site_config.get('general/0/keep_stage_files')): printer.error(""stage and output refer to the same directory; "" ""if this is on purpose, please use the "" ""'--keep-stage-files' option."") sys.exit(1) # Show configuration after everything is set up if options.show_config: config_param = options.show_config if config_param == 'all': printer.info(str(rt.site_config)) else: value = rt.get_option(config_param) if value is None: printer.error( f'no such configuration parameter found: {config_param}' ) else: printer.info(json.dumps(value, indent=2)) sys.exit(0) printer.debug(format_env(options.env_vars)) # Setup the check loader loader = RegressionCheckLoader( load_path=site_config.get('general/0/check_search_path'), recurse=site_config.get('general/0/check_search_recursive'), ignore_conflicts=site_config.get('general/0/ignore_check_conflicts') ) def print_infoline(param, value): param = param + ':' printer.info(f"" {param.ljust(18)} {value}"") # Print command line printer.info(f""[ReFrame Setup]"") print_infoline('version', os_ext.reframe_version()) print_infoline('command', repr(' '.join(sys.argv))) print_infoline('launched by', f""{os_ext.osuser() or ''}@{socket.gethostname()}"") print_infoline('working directory', repr(os.getcwd())) print_infoline('configuration', f""'{site_config.filename}'"") print_infoline('check search path', f""{'(R) ' if loader.recurse else ''}"" f""{':'.join(loader.load_path)!r}"") print_infoline('stage directory', repr(rt.stage_prefix)) print_infoline('output directory', repr(rt.output_prefix)) printer.info('') try: # Locate and load checks try: checks_found = loader.load_all() except OSError as e: raise ReframeError from e # Filter checks by name checks_matched = checks_found if options.exclude_names: for name in options.exclude_names: checks_matched = filter(filters.have_not_name(name), checks_matched) if options.names: checks_matched = filter(filters.have_name('|'.join(options.names)), checks_matched) # Filter checks by tags for tag in options.tags: checks_matched = filter(filters.have_tag(tag), checks_matched) # Filter checks by prgenv if not options.skip_prgenv_check: for prgenv in options.prgenv: checks_matched = filter(filters.have_prgenv(prgenv), checks_matched) # Filter checks by system if not options.skip_system_check: checks_matched = filter( filters.have_partition(rt.system.partitions), checks_matched) # Filter checks further if options.gpu_only and options.cpu_only: printer.error(""options `--gpu-only' and `--cpu-only' "" ""are mutually exclusive"") sys.exit(1) if options.gpu_only: checks_matched = filter(filters.have_gpu_only(), checks_matched) elif options.cpu_only: checks_matched = filter(filters.have_cpu_only(), checks_matched) # Determine the allowed programming environments allowed_environs = {e.name for env_patt in options.prgenv for p in rt.system.partitions for e in p.environs if re.match(env_patt, e.name)} # Generate the test cases, validate dependencies and sort them checks_matched = list(checks_matched) testcases = generate_testcases(checks_matched, options.skip_system_check, options.skip_prgenv_check, allowed_environs) testgraph = dependency.build_deps(testcases) dependency.validate_deps(testgraph) testcases = dependency.toposort(testgraph) # Manipulate ReFrame's environment if site_config.get('general/0/purge_environment'): rt.modules_system.unload_all() else: for m in site_config.get('general/0/unload_modules'): rt.modules_system.unload_module(m) # Load the environment for the current system try: runtime.loadenv(rt.system.preload_environ) except EnvironError as e: printer.error(""failed to load current system's environment; "" ""please check your configuration"") printer.debug(str(e)) raise for m in site_config.get('general/0/user_modules'): try: rt.modules_system.load_module(m, force=True) except EnvironError as e: printer.warning(""could not load module '%s' correctly: "" ""Skipping..."" % m) printer.debug(str(e)) if options.flex_alloc_tasks: printer.warning(""`--flex-alloc-tasks' is deprecated and "" ""will be removed in the future; "" ""you should use --flex-alloc-nodes instead"") options.flex_alloc_nodes = (options.flex_alloc_nodes or options.flex_alloc_tasks) options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle' # Act on checks success = True if options.list: # List matched checks list_checks(list(checks_matched), printer) elif options.list_detailed: # List matched checks with details list_checks(list(checks_matched), printer, detailed=True) elif options.run: # Setup the execution policy if options.exec_policy == 'serial': exec_policy = SerialExecutionPolicy() elif options.exec_policy == 'async': exec_policy = AsynchronousExecutionPolicy() else: # This should not happen, since choices are handled by # argparser printer.error(""unknown execution policy `%s': Exiting..."") sys.exit(1) exec_policy.skip_system_check = options.skip_system_check exec_policy.force_local = options.force_local exec_policy.strict_check = options.strict exec_policy.skip_sanity_check = options.skip_sanity_check exec_policy.skip_performance_check = options.skip_performance_check exec_policy.keep_stage_files = site_config.get( 'general/0/keep_stage_files' ) try: errmsg = ""invalid option for --flex-alloc-nodes: '{0}'"" sched_flex_alloc_nodes = int(options.flex_alloc_nodes) if sched_flex_alloc_nodes <= 0: raise ConfigError(errmsg.format(options.flex_alloc_nodes)) except ValueError: if not options.flex_alloc_nodes.casefold() in {'idle', 'all'}: raise ConfigError( errmsg.format(options.flex_alloc_nodes)) from None sched_flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes exec_policy.flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_account = options.account exec_policy.sched_partition = options.partition exec_policy.sched_reservation = options.reservation exec_policy.sched_nodelist = options.nodelist exec_policy.sched_exclude_nodelist = options.exclude_nodes exec_policy.sched_options = options.job_options try: max_retries = int(options.max_retries) except ValueError: raise ConfigError('--max-retries is not a valid integer: %s' % max_retries) from None runner = Runner(exec_policy, printer, max_retries) try: runner.runall(testcases) finally: # Print a retry report if we did any retries if runner.stats.failures(run=0): printer.info(runner.stats.retry_report()) # Print a failure report if we had failures in the last run if runner.stats.failures(): printer.info(runner.stats.failure_report()) success = False if options.failure_stats: printer.info(runner.stats.failure_stats()) if options.performance_report: printer.info(runner.stats.performance_report()) else: printer.error(""No action specified. Please specify `-l'/`-L' for "" ""listing or `-r' for running. "" ""Try `%s -h' for more options."" % argparser.prog) sys.exit(1) if not success: sys.exit(1) sys.exit(0) except KeyboardInterrupt: sys.exit(1) except ReframeError as e: printer.error(str(e)) sys.exit(1) except (Exception, ReframeFatalError): printer.error(format_exception(*sys.exc_info())) sys.exit(1) finally: try: if site_config.get('general/0/save_log_files'): logging.save_log_files(rt.output_prefix) except OSError as e: printer.error('could not save log file: %s' % e) sys.exit(1) ","def main(): # Setup command line options argparser = argparse.ArgumentParser() output_options = argparser.add_argument_group('Options controlling output') locate_options = argparser.add_argument_group( 'Options for locating checks') select_options = argparser.add_argument_group( 'Options for selecting checks') action_options = argparser.add_argument_group( 'Options controlling actions') run_options = argparser.add_argument_group( 'Options controlling execution of checks') env_options = argparser.add_argument_group( 'Options controlling environment') misc_options = argparser.add_argument_group('Miscellaneous options') # Output directory options output_options.add_argument( '--prefix', action='store', metavar='DIR', help='Set output directory prefix to DIR', envvar='RFM_PREFIX', configvar='systems/prefix' ) output_options.add_argument( '-o', '--output', action='store', metavar='DIR', help='Set output directory to DIR', envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir' ) output_options.add_argument( '-s', '--stage', action='store', metavar='DIR', help='Set stage directory to DIR', envvar='RFM_STAGE_DIR', configvar='systems/stagedir' ) output_options.add_argument( '--perflogdir', action='store', metavar='DIR', help=('Set directory prefix for the performance logs ' '(default: ${prefix}/perflogs, ' 'relevant only if the filelog backend is used)'), envvar='RFM_PERFLOG_DIR', configvar='logging/handlers_perflog/filelog_basedir' ) output_options.add_argument( '--keep-stage-files', action='store_true', help='Keep stage directory even if check is successful', envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files' ) output_options.add_argument( '--save-log-files', action='store_true', default=False, help=('Copy the log file from the current directory to the ' 'output directory when ReFrame ends'), envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files' ) # Check discovery options locate_options.add_argument( '-c', '--checkpath', action='append', metavar='DIR|FILE', help=""Add DIR or FILE to the check search path"", envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path' ) locate_options.add_argument( '-R', '--recursive', action='store_true', help='Load checks recursively', envvar='RFM_CHECK_SEARCH_RECURSIVE', configvar='general/check_search_recursive' ) locate_options.add_argument( '--ignore-check-conflicts', action='store_true', help='Skip checks with conflicting names', envvar='RFM_IGNORE_CHECK_CONFLICTS', configvar='general/ignore_check_conflicts' ) # Select options select_options.add_argument( '-t', '--tag', action='append', dest='tags', metavar='TAG', default=[], help='Select checks matching TAG' ) select_options.add_argument( '-n', '--name', action='append', dest='names', default=[], metavar='NAME', help='Select checks with NAME' ) select_options.add_argument( '-x', '--exclude', action='append', dest='exclude_names', metavar='NAME', default=[], help='Exclude checks with NAME' ) select_options.add_argument( '-p', '--prgenv', action='append', default=[r'.*'], help='Select tests for PRGENV programming environment only' ) select_options.add_argument( '--gpu-only', action='store_true', help='Select only GPU tests') select_options.add_argument( '--cpu-only', action='store_true', help='Select only CPU tests') # Action options action_options.add_argument( '-l', '--list', action='store_true', help='List matched regression checks') action_options.add_argument( '-L', '--list-detailed', action='store_true', help='List matched regression checks with a detailed description') action_options.add_argument( '-r', '--run', action='store_true', help='Run regression with the selected checks') # Run options run_options.add_argument( '-A', '--account', action='store', help='Use ACCOUNT for submitting jobs') run_options.add_argument( '-P', '--partition', action='store', metavar='PART', help='Use PART for submitting jobs') run_options.add_argument( '--reservation', action='store', metavar='RES', help='Use RES for submitting jobs') run_options.add_argument( '--nodelist', action='store', help='Run checks on the selected list of nodes') run_options.add_argument( '--exclude-nodes', action='store', metavar='NODELIST', help='Exclude the list of nodes from running checks') run_options.add_argument( '--job-option', action='append', metavar='OPT', dest='job_options', default=[], help='Pass OPT to job scheduler') run_options.add_argument( '--force-local', action='store_true', help='Force local execution of checks') run_options.add_argument( '--skip-sanity-check', action='store_true', help='Skip sanity checking') run_options.add_argument( '--skip-performance-check', action='store_true', help='Skip performance checking') run_options.add_argument( '--strict', action='store_true', help='Force strict performance checking') run_options.add_argument( '--skip-system-check', action='store_true', help='Skip system check') run_options.add_argument( '--skip-prgenv-check', action='store_true', help='Skip prog. environment check') run_options.add_argument( '--exec-policy', metavar='POLICY', action='store', choices=['async', 'serial'], default='async', help='Specify the execution policy for running the regression tests. ' 'Available policies: ""async"" (default), ""serial""') run_options.add_argument( '--mode', action='store', help='Execution mode to use') run_options.add_argument( '--max-retries', metavar='NUM', action='store', default=0, help='Specify the maximum number of times a failed regression test ' 'may be retried (default: 0)') run_options.add_argument( '--flex-alloc-tasks', action='store', dest='flex_alloc_tasks', metavar='{all|idle|NUM}', default=None, help='*deprecated*, please use --flex-alloc-nodes instead') run_options.add_argument( '--flex-alloc-nodes', action='store', dest='flex_alloc_nodes', metavar='{all|idle|NUM}', default=None, help=""Strategy for flexible node allocation (default: 'idle')."") env_options.add_argument( '-M', '--map-module', action='append', metavar='MAPPING', dest='module_mappings', default=[], help='Apply a single module mapping', envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings' ) env_options.add_argument( '-m', '--module', action='append', default=[], metavar='MOD', dest='user_modules', help='Load module MOD before running the regression suite', envvar='RFM_USER_MODULES ,', configvar='general/user_modules' ) env_options.add_argument( '--module-mappings', action='store', metavar='FILE', dest='module_map_file', help='Apply module mappings defined in FILE', envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file' ) env_options.add_argument( '-u', '--unload-module', action='append', metavar='MOD', dest='unload_modules', default=[], help='Unload module MOD before running the regression suite', envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules' ) env_options.add_argument( '--purge-env', action='store_true', dest='purge_env', default=False, help='Purge environment before running the regression suite', envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment' ) env_options.add_argument( '--non-default-craype', action='store_true', help='Test a non-default Cray PE', envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype' ) # Miscellaneous options misc_options.add_argument( '-C', '--config-file', action='store', dest='config_file', metavar='FILE', help='ReFrame configuration file to use', envvar='RFM_CONFIG_FILE' ) misc_options.add_argument( '--nocolor', action='store_false', dest='colorize', help='Disable coloring of output', envvar='RFM_COLORIZE', configvar='general/colorize' ) misc_options.add_argument( '--failure-stats', action='store_true', help='Print failure statistics' ) misc_options.add_argument( '--performance-report', action='store_true', help='Print a report for performance tests run' ) misc_options.add_argument( '--show-config', action='store', nargs='?', const='all', metavar='PARAM', help=( 'Print how parameter PARAM is configured ' 'for the current system and exit' ) ) misc_options.add_argument( '--system', action='store', help='Load configuration for SYSTEM', envvar='RFM_SYSTEM' ) misc_options.add_argument( '--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT', help=('Append a timestamp component to the various ' 'ReFrame directories (default format: ""%%FT%%T"")'), envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs' ) misc_options.add_argument('-V', '--version', action='version', version=os_ext.reframe_version()) misc_options.add_argument( '-v', '--verbose', action='count', help='Increase verbosity level of output', envvar='RFM_VERBOSE', configvar='general/verbose' ) # Options not associated with command-line arguments argparser.add_argument( dest='graylog_server', envvar='RFM_GRAYLOG_SERVER', configvar='logging/handlers_perflog/graylog_address', help='Graylog server address' ) if len(sys.argv) == 1: argparser.print_help() sys.exit(1) # Parse command line options = argparser.parse_args() # First configure logging with our generic configuration so as to be able # to print pretty messages; logging will be reconfigured by user's # configuration later site_config = config.load_config( os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py') ) site_config.select_subconfig('generic') options.update_config(site_config) logging.configure_logging(site_config) logging.getlogger().colorize = site_config.get('general/0/colorize') printer = PrettyPrinter() printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) # Now configure ReFrame according to the user configuration file try: try: site_config = config.load_config(options.config_file) except ReframeDeprecationWarning as e: printer.warning(e) converted = config.convert_old_config(options.config_file) printer.warning( f""configuration file has been converted "" f""to the new syntax here: '{converted}'"" ) site_config = config.load_config(converted) site_config.validate() site_config.select_subconfig(options.system) for err in options.update_config(site_config): printer.warning(str(err)) logging.configure_logging(site_config) except (OSError, ConfigError) as e: printer.error(f'failed to load configuration: {e}') sys.exit(1) logging.getlogger().colorize = site_config.get('general/0/colorize') printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) try: runtime.init_runtime(site_config) except ConfigError as e: printer.error(f'failed to initialize runtime: {e}') sys.exit(1) rt = runtime.runtime() try: if site_config.get('general/0/module_map_file'): rt.modules_system.load_mapping_from_file( site_config.get('general/0/module_map_file') ) if site_config.get('general/0/module_mappings'): for m in site_config.get('general/0/module_mappings'): rt.modules_system.load_mapping(m) except (ConfigError, OSError) as e: printer.error('could not load module mappings: %s' % e) sys.exit(1) if options.mode: try: mode_args = rt.get_option(f'modes/@{options.mode}/options') # Parse the mode's options and reparse the command-line options = argparser.parse_args(mode_args) options = argparser.parse_args(namespace=options.cmd_options) options.update_config(rt.site_config) except ConfigError as e: printer.error('could not obtain execution mode: %s' % e) sys.exit(1) if (os_ext.samefile(rt.stage_prefix, rt.output_prefix) and not site_config.get('general/0/keep_stage_files')): printer.error(""stage and output refer to the same directory; "" ""if this is on purpose, please use the "" ""'--keep-stage-files' option."") sys.exit(1) # Show configuration after everything is set up if options.show_config: config_param = options.show_config if config_param == 'all': printer.info(str(rt.site_config)) else: value = rt.get_option(config_param) if value is None: printer.error( f'no such configuration parameter found: {config_param}' ) else: printer.info(json.dumps(value, indent=2)) sys.exit(0) printer.debug(format_env(options.env_vars)) # Setup the check loader loader = RegressionCheckLoader( load_path=site_config.get('general/0/check_search_path'), recurse=site_config.get('general/0/check_search_recursive'), ignore_conflicts=site_config.get('general/0/ignore_check_conflicts') ) def print_infoline(param, value): param = param + ':' printer.info(f"" {param.ljust(18)} {value}"") # Print command line printer.info(f""[ReFrame Setup]"") print_infoline('version', os_ext.reframe_version()) print_infoline('command', repr(' '.join(sys.argv))) print_infoline('launched by', f""{os_ext.osuser() or ''}@{socket.gethostname()}"") print_infoline('working directory', repr(os.getcwd())) print_infoline('settings file', f'{site_config.filename!r}') print_infoline('check search path', f""{'(R) ' if loader.recurse else ''}"" f""{':'.join(loader.load_path)!r}"") print_infoline('stage directory', repr(rt.stage_prefix)) print_infoline('output directory', repr(rt.output_prefix)) printer.info('') try: # Locate and load checks try: checks_found = loader.load_all() except OSError as e: raise ReframeError from e # Filter checks by name checks_matched = checks_found if options.exclude_names: for name in options.exclude_names: checks_matched = filter(filters.have_not_name(name), checks_matched) if options.names: checks_matched = filter(filters.have_name('|'.join(options.names)), checks_matched) # Filter checks by tags for tag in options.tags: checks_matched = filter(filters.have_tag(tag), checks_matched) # Filter checks by prgenv if not options.skip_prgenv_check: for prgenv in options.prgenv: checks_matched = filter(filters.have_prgenv(prgenv), checks_matched) # Filter checks by system if not options.skip_system_check: checks_matched = filter( filters.have_partition(rt.system.partitions), checks_matched) # Filter checks further if options.gpu_only and options.cpu_only: printer.error(""options `--gpu-only' and `--cpu-only' "" ""are mutually exclusive"") sys.exit(1) if options.gpu_only: checks_matched = filter(filters.have_gpu_only(), checks_matched) elif options.cpu_only: checks_matched = filter(filters.have_cpu_only(), checks_matched) # Determine the allowed programming environments allowed_environs = {e.name for env_patt in options.prgenv for p in rt.system.partitions for e in p.environs if re.match(env_patt, e.name)} # Generate the test cases, validate dependencies and sort them checks_matched = list(checks_matched) testcases = generate_testcases(checks_matched, options.skip_system_check, options.skip_prgenv_check, allowed_environs) testgraph = dependency.build_deps(testcases) dependency.validate_deps(testgraph) testcases = dependency.toposort(testgraph) # Manipulate ReFrame's environment if site_config.get('general/0/purge_environment'): rt.modules_system.unload_all() else: for m in site_config.get('general/0/unload_modules'): rt.modules_system.unload_module(m) # Load the environment for the current system try: runtime.loadenv(rt.system.preload_environ) except EnvironError as e: printer.error(""failed to load current system's environment; "" ""please check your configuration"") printer.debug(str(e)) raise for m in site_config.get('general/0/user_modules'): try: rt.modules_system.load_module(m, force=True) except EnvironError as e: printer.warning(""could not load module '%s' correctly: "" ""Skipping..."" % m) printer.debug(str(e)) if options.flex_alloc_tasks: printer.warning(""`--flex-alloc-tasks' is deprecated and "" ""will be removed in the future; "" ""you should use --flex-alloc-nodes instead"") options.flex_alloc_nodes = (options.flex_alloc_nodes or options.flex_alloc_tasks) options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle' # Act on checks success = True if options.list: # List matched checks list_checks(list(checks_matched), printer) elif options.list_detailed: # List matched checks with details list_checks(list(checks_matched), printer, detailed=True) elif options.run: # Setup the execution policy if options.exec_policy == 'serial': exec_policy = SerialExecutionPolicy() elif options.exec_policy == 'async': exec_policy = AsynchronousExecutionPolicy() else: # This should not happen, since choices are handled by # argparser printer.error(""unknown execution policy `%s': Exiting..."") sys.exit(1) exec_policy.skip_system_check = options.skip_system_check exec_policy.force_local = options.force_local exec_policy.strict_check = options.strict exec_policy.skip_sanity_check = options.skip_sanity_check exec_policy.skip_performance_check = options.skip_performance_check exec_policy.keep_stage_files = site_config.get( 'general/0/keep_stage_files' ) try: errmsg = ""invalid option for --flex-alloc-nodes: '{0}'"" sched_flex_alloc_nodes = int(options.flex_alloc_nodes) if sched_flex_alloc_nodes <= 0: raise ConfigError(errmsg.format(options.flex_alloc_nodes)) except ValueError: if not options.flex_alloc_nodes.casefold() in {'idle', 'all'}: raise ConfigError( errmsg.format(options.flex_alloc_nodes)) from None sched_flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes exec_policy.flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_account = options.account exec_policy.sched_partition = options.partition exec_policy.sched_reservation = options.reservation exec_policy.sched_nodelist = options.nodelist exec_policy.sched_exclude_nodelist = options.exclude_nodes exec_policy.sched_options = options.job_options try: max_retries = int(options.max_retries) except ValueError: raise ConfigError('--max-retries is not a valid integer: %s' % max_retries) from None runner = Runner(exec_policy, printer, max_retries) try: runner.runall(testcases) finally: # Print a retry report if we did any retries if runner.stats.failures(run=0): printer.info(runner.stats.retry_report()) # Print a failure report if we had failures in the last run if runner.stats.failures(): printer.info(runner.stats.failure_report()) success = False if options.failure_stats: printer.info(runner.stats.failure_stats()) if options.performance_report: printer.info(runner.stats.performance_report()) else: printer.error(""No action specified. Please specify `-l'/`-L' for "" ""listing or `-r' for running. "" ""Try `%s -h' for more options."" % argparser.prog) sys.exit(1) if not success: sys.exit(1) sys.exit(0) except KeyboardInterrupt: sys.exit(1) except ReframeError as e: printer.error(str(e)) sys.exit(1) except (Exception, ReframeFatalError): printer.error(format_exception(*sys.exc_info())) sys.exit(1) finally: try: if site_config.get('general/0/save_log_files'): logging.save_log_files(rt.output_prefix) except OSError as e: printer.error('could not save log file: %s' % e) sys.exit(1) " 10918,"def get_os_name(): """""" Determine system name, e.g., 'redhat' (generic), 'centos', 'debian', 'fedora', 'suse', 'ubuntu', 'red hat enterprise linux server', 'SL' (Scientific Linux), 'opensuse', ... """""" os_name = None # platform.linux_distribution was removed in Python 3.8, # see https://docs.python.org/2/library/platform.html#platform.linux_distribution if hasattr(platform, 'linux_distribution'): # platform.linux_distribution is more useful, but only available since Python 2.6 # this allows to differentiate between Fedora, CentOS, RHEL and Scientific Linux (Rocks is just CentOS) os_name = platform.linux_distribution()[0].strip() elif HAVE_DISTRO: # distro package is the recommended alternative to platform.linux_distribution, # see https://pypi.org/project/distro os_name = distro.name() elif os.path.exists(ETC_OS_RELEASE): os_release_txt = read_file(ETC_OS_RELEASE) name_regex = re.compile('^NAME=""?(?P[^""\n]+)""?$', re.M) res = name_regex.search(os_release_txt) if res: os_name = res.group('name') else: # no easy way to determine name of Linux distribution os_name = None os_name_map = { 'red hat enterprise linux server': 'RHEL', 'red hat enterprise linux': 'RHEL', # RHEL8 has no server/client 'scientific linux sl': 'SL', 'scientific linux': 'SL', 'suse linux enterprise server': 'SLES', } if os_name: return os_name_map.get(os_name.lower(), os_name) else: return UNKNOWN ","def get_os_name(): """""" Determine system name, e.g., 'redhat' (generic), 'centos', 'debian', 'fedora', 'suse', 'ubuntu', 'red hat enterprise linux server', 'SL' (Scientific Linux), 'opensuse', ... """""" os_name = None # platform.linux_distribution was removed in Python 3.8, # see https://docs.python.org/2/library/platform.html#platform.linux_distribution if hasattr(platform, 'linux_distribution'): # platform.linux_distribution is more useful, but only available since Python 2.6 # this allows to differentiate between Fedora, CentOS, RHEL and Scientific Linux (Rocks is just CentOS) os_name = platform.linux_distribution()[0].strip() if HAVE_DISTRO and not os_name: # distro package is the recommended alternative to platform.linux_distribution, # see https://pypi.org/project/distro os_name = distro.name() elif os.path.exists(ETC_OS_RELEASE): os_release_txt = read_file(ETC_OS_RELEASE) name_regex = re.compile('^NAME=""?(?P[^""\n]+)""?$', re.M) res = name_regex.search(os_release_txt) if res: os_name = res.group('name') else: # no easy way to determine name of Linux distribution os_name = None os_name_map = { 'red hat enterprise linux server': 'RHEL', 'red hat enterprise linux': 'RHEL', # RHEL8 has no server/client 'scientific linux sl': 'SL', 'scientific linux': 'SL', 'suse linux enterprise server': 'SLES', } if os_name: return os_name_map.get(os_name.lower(), os_name) else: return UNKNOWN " 13816,"def _load_csv(csv_file): """""" Reads CSV of large cookie data and returns a dict of details. Arguments: csv_file (string): File name for the csv Returns a list of dicts containing parsed details for each cookie header log entry. """""" with open(csv_file) as file: csv_data = file.read() reader = csv.DictReader(csv_data.splitlines()) # Regex to match against log messages like the following: # BEGIN-COOKIE-SIZES(total=3773) user-info: 903, csrftoken: 64, ... END-COOKIE-SIZES cookie_log_regex = re.compile(r""BEGIN-COOKIE-SIZES\(total=(?P\d+)\)(?P.*)END-COOKIE-SIZES"") cookie_sizes_strings_processed = set() # Regex to match against just a single size, like the following: # csrftoken: 64 cookie_size_regex = re.compile(r""(?P.*): (?P\d+)"") cookie_headers = [] for row in reader: cookie_header_sizes = {} raw_cookie_log = row.get(""_raw"") cookie_begin_count = raw_cookie_log.count(""BEGIN-COOKIE-SIZES"") if cookie_begin_count == 0: logging.info(""No BEGIN-COOKIE-SIZES delimeter found. Skipping row."") elif cookie_begin_count > 1: # Note: this wouldn't parse correctly right now, and it isn't worth coding for. logging.warning(""Multiple cookie entries found in same row. Skipping row."") continue match = cookie_log_regex.search(raw_cookie_log) if not match: logging.error(""Multiple cookie entries found in same row. Skipping row."") continue cookie_header_size = int(match.group(""total"")) if cookie_header_size == 0: continue cookie_sizes_str = match.group(""cookie_sizes"").strip() if cookie_sizes_str in cookie_sizes_strings_processed: logging.debug(""Skipping already processed cookies."") continue cookie_sizes_strings_processed.add(cookie_sizes_str) cookie_sizes = cookie_sizes_str.split("", "") for cookie_size in cookie_sizes: match = cookie_size_regex.search(cookie_size) if not match: logging.error(f""Could not parse cookie size from: {cookie_size}"") continue cookie_header_sizes[match.group(""name"")] = int(match.group(""size"")) cookie_header_size_computed = max( 0, sum(len(name) + size + 3 for (name, size) in cookie_header_sizes.items()) - 2 ) cookie_headers.append({ ""datetime"": parser.parse(row.get(""_time"")), ""env"": row.get(""index""), ""cookie_header_size"": cookie_header_size, ""cookie_header_size_computed"": cookie_header_size_computed, ""cookie_sizes"": cookie_header_sizes, }) return cookie_headers ","def _load_csv(csv_file): """""" Reads CSV of large cookie data and returns a dict of details. Arguments: csv_file (string): File name for the csv Returns a list of dicts containing parsed details for each cookie header log entry. """""" with open(csv_file) as file: csv_data = file.read() reader = csv.DictReader(csv_data.splitlines()) # Regex to match against log messages like the following: # BEGIN-COOKIE-SIZES(total=3773) user-info: 903, csrftoken: 64, ... END-COOKIE-SIZES cookie_log_regex = re.compile(r""BEGIN-COOKIE-SIZES\(total=(?P\d+)\)(?P.*)END-COOKIE-SIZES"") cookie_sizes_strings_processed = set() # Regex to match against just a single size, like the following: # csrftoken: 64 cookie_size_regex = re.compile(r""(?P.*): (?P\d+)"") cookie_headers = [] for row in reader: cookie_header_sizes = {} raw_cookie_log = row.get(""_raw"") cookie_begin_count = raw_cookie_log.count(""BEGIN-COOKIE-SIZES"") if cookie_begin_count == 0: logging.info(""No BEGIN-COOKIE-SIZES delimeter found. Skipping row."") elif cookie_begin_count > 1: # Note: this wouldn't parse correctly right now, and it isn't worth coding for. logging.warning(""Multiple cookie entries found in same row. Skipping row."") continue match = cookie_log_regex.search(raw_cookie_log) if not match: logging.error(""Malformed cookie entry. Skipping row."") continue cookie_header_size = int(match.group(""total"")) if cookie_header_size == 0: continue cookie_sizes_str = match.group(""cookie_sizes"").strip() if cookie_sizes_str in cookie_sizes_strings_processed: logging.debug(""Skipping already processed cookies."") continue cookie_sizes_strings_processed.add(cookie_sizes_str) cookie_sizes = cookie_sizes_str.split("", "") for cookie_size in cookie_sizes: match = cookie_size_regex.search(cookie_size) if not match: logging.error(f""Could not parse cookie size from: {cookie_size}"") continue cookie_header_sizes[match.group(""name"")] = int(match.group(""size"")) cookie_header_size_computed = max( 0, sum(len(name) + size + 3 for (name, size) in cookie_header_sizes.items()) - 2 ) cookie_headers.append({ ""datetime"": parser.parse(row.get(""_time"")), ""env"": row.get(""index""), ""cookie_header_size"": cookie_header_size, ""cookie_header_size_computed"": cookie_header_size_computed, ""cookie_sizes"": cookie_header_sizes, }) return cookie_headers " 43785,"def pauli_group(n_qubits, wire_map=None): """"""Constructs the n-qubit Pauli group. This function constructs and returns the complete :math:`n`-qubit Pauli group (without global phases) on the desired set of wires. This function differs from ``pauli_group_generator`` in that returns the group in full rather than one element at a time. Args: n_qubits (int): The number of qubits for which to create the group. wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli word as keys, and unique integer labels as their values. If no wire map is provided, wires will be labeled by integers between 0 and ``n_qubits``. Returns: (list[qml.Operation]): The full n-qubit Pauli group. **Example** Construction of the Pauli group can be done as follows: .. code-block:: python from pennylane.grouping.pauli_group import pauli_group n_qubits = 3 pg = pauli_group(n_qubits) """""" if not isinstance(n_qubits, int): raise TypeError(""Must specify an integer number of qubits construct the Pauli group."") if n_qubits <= 0: raise ValueError(""Number of qubits must be at least 1 to construct Pauli group."") return list(pauli_group_generator(n_qubits, wire_map=wire_map)) ","def pauli_group(n_qubits, wire_map=None): """"""Constructs the n-qubit Pauli group. This function constructs and returns the complete :math:`n`-qubit Pauli group (without global phases) on the desired set of wires. This function differs from ``pauli_group_generator`` in that returns the group in full rather than one element at a time. Args: n_qubits (int): The number of qubits for which to create the group. wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli word as keys, and unique integer labels as their values. If no wire map is provided, wires will be labeled by integers between 0 and ``n_qubits``. Returns: list[.Operation]: The full n-qubit Pauli group. **Example** Construction of the Pauli group can be done as follows: .. code-block:: python from pennylane.grouping.pauli_group import pauli_group n_qubits = 3 pg = pauli_group(n_qubits) """""" if not isinstance(n_qubits, int): raise TypeError(""Must specify an integer number of qubits construct the Pauli group."") if n_qubits <= 0: raise ValueError(""Number of qubits must be at least 1 to construct Pauli group."") return list(pauli_group_generator(n_qubits, wire_map=wire_map)) " 47935,"def render_routine(line): """"""Function for rendering single formula Args: line (tuple): formula idx, formula string, path to store rendered image """""" formula, file_idx, folder_path = line output_path = Path(folder_path, file_idx) pre_name = os.path.normcase(output_path).replace('/', '_').replace('.', '_') formula = preprocess_formula(formula) if not output_path.exists(): tex_filename = Path(folder_path, pre_name + '.tex') log_filename = tex_filename.with_name(pre_name + '.log') aux_filename = tex_filename.with_name(pre_name + '.aux') with open(str(tex_filename), ""w"") as w: w.write(template % formula) subprocess.run(['pdflatex', '-interaction=nonstopmode', '-output-directory', folder_path, str(tex_filename)], check=False, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt') for filename in (tex_filename, log_filename, aux_filename): if filename.exists(): filename.unlink() pdf_filename = tex_filename.with_name(pre_name + '.pdf') png_filename = tex_filename.with_name(pre_name + '.png') if not pdf_filename.exists(): print_info('ERROR: {} cannot compile\n'.format(file_idx)) else: subprocess.run(['convert', '+profile', '""icc""', '-density', '200', '-quality', '100', str(pdf_filename), str(png_filename)], check=True, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt') if pdf_filename.exists(): pdf_filename.unlink() if png_filename.exists(): crop_image(str(png_filename), str(output_path)) png_filename.unlink() else: print_info(""ERROR: {png_filename} does not exists"".format(png_filename=png_filename)) ","def render_routine(line): """"""Function for rendering single formula Args: line (tuple): formula idx, formula string, path to store rendered image """""" formula, file_idx, folder_path = line output_path = Path(folder_path, file_idx) pre_name = os.path.normcase(output_path).replace('/', '_').replace('.', '_') formula = preprocess_formula(formula) if not output_path.exists(): tex_filename = Path(folder_path, pre_name + '.tex') log_filename = tex_filename.with_suffix('.log') aux_filename = tex_filename.with_name(pre_name + '.aux') with open(str(tex_filename), ""w"") as w: w.write(template % formula) subprocess.run(['pdflatex', '-interaction=nonstopmode', '-output-directory', folder_path, str(tex_filename)], check=False, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt') for filename in (tex_filename, log_filename, aux_filename): if filename.exists(): filename.unlink() pdf_filename = tex_filename.with_name(pre_name + '.pdf') png_filename = tex_filename.with_name(pre_name + '.png') if not pdf_filename.exists(): print_info('ERROR: {} cannot compile\n'.format(file_idx)) else: subprocess.run(['convert', '+profile', '""icc""', '-density', '200', '-quality', '100', str(pdf_filename), str(png_filename)], check=True, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt') if pdf_filename.exists(): pdf_filename.unlink() if png_filename.exists(): crop_image(str(png_filename), str(output_path)) png_filename.unlink() else: print_info(""ERROR: {png_filename} does not exists"".format(png_filename=png_filename)) " 54842,"def from_xir(xir_prog): """"""Convert an XIR Program to a Strawberry Fields program. Args: xir_prog (xir.Program): the input XIR program object Returns: Program: corresponding Strawberry Fields program """""" # only script-level statements are part of `xir_prog.statements`, which can only have integer # wires, leading to `xir_prog.wires` only containing integer wire labels num_of_modes = int(max(xir_prog.wires or [-1])) + 1 name = xir_prog.options.get(""name"", ""xir"") if num_of_modes == 0: raise ValueError( ""The XIR program is empty and cannot be transformed into a Strawberry Fields program"" ) prog = sfp.Program(num_of_modes, name=name) # append the quantum operations with prog.context as q: for op in xir_prog.statements: # check if operation name is in the list of # defined StrawberryFields operations. # This is used by checking against the ops.py __all__ # module attribute, which contains the names # of all defined quantum operations if op.name in ops.__all__: # get the quantum operation from the sf.ops module gate = getattr(ops, op.name) else: raise NameError(f""Quantum operation {op.name!r} not defined!"") # create the list of regrefs regrefs = [q[i] for i in op.wires] if op.params: # convert symbolic expressions to symbolic expressions containing the corresponding # MeasuredParameter and FreeParameter instances. if isinstance(op.params, dict): vals = sfpar.par_convert(op.params.values(), prog) params = dict(zip(op.params.keys(), vals)) gate(**params) | regrefs # pylint:disable=expression-not-assigned else: params = [] for p in op.params: if isinstance(p, Decimal): params.append(float(p)) elif isinstance(p, Iterable): params.append(np.array(_listr(p))) else: params.append(p) params = sfpar.par_convert(params, prog) gate(*params) | regrefs # pylint:disable=expression-not-assigned else: if callable(gate): gate() | regrefs # pylint:disable=expression-not-assigned,pointless-statement else: gate | regrefs # pylint:disable=expression-not-assigned,pointless-statement prog._target = xir_prog.options.get(""target"", None) # pylint: disable=protected-access if ""shots"" in xir_prog.options: prog.run_options[""shots""] = xir_prog.options[""shots""] if ""cutoff_dim"" in xir_prog.options: prog.backend_options[""cutoff_dim""] = xir_prog.options[""cutoff_dim""] return prog ","def from_xir(xir_prog): """"""Convert an XIR Program to a Strawberry Fields program. Args: xir_prog (xir.Program): the input XIR program object Returns: Program: corresponding Strawberry Fields program """""" # only script-level statements are part of `xir_prog.statements`, which can only have integer # wires, leading to `xir_prog.wires` only containing integer wire labels if not xir_prog.wires: raise ValueError( ""The XIR program is empty and cannot be transformed "" ""into a Strawberry Fields program."" ) num_of_modes = max(xir_prog.wires) + 1 name = xir_prog.options.get(""name"", ""xir"") prog = sfp.Program(num_of_modes, name=name) # append the quantum operations with prog.context as q: for op in xir_prog.statements: # check if operation name is in the list of # defined StrawberryFields operations. # This is used by checking against the ops.py __all__ # module attribute, which contains the names # of all defined quantum operations if op.name in ops.__all__: # get the quantum operation from the sf.ops module gate = getattr(ops, op.name) else: raise NameError(f""Quantum operation {op.name!r} not defined!"") # create the list of regrefs regrefs = [q[i] for i in op.wires] if op.params: # convert symbolic expressions to symbolic expressions containing the corresponding # MeasuredParameter and FreeParameter instances. if isinstance(op.params, dict): vals = sfpar.par_convert(op.params.values(), prog) params = dict(zip(op.params.keys(), vals)) gate(**params) | regrefs # pylint:disable=expression-not-assigned else: params = [] for p in op.params: if isinstance(p, Decimal): params.append(float(p)) elif isinstance(p, Iterable): params.append(np.array(_listr(p))) else: params.append(p) params = sfpar.par_convert(params, prog) gate(*params) | regrefs # pylint:disable=expression-not-assigned else: if callable(gate): gate() | regrefs # pylint:disable=expression-not-assigned,pointless-statement else: gate | regrefs # pylint:disable=expression-not-assigned,pointless-statement prog._target = xir_prog.options.get(""target"", None) # pylint: disable=protected-access if ""shots"" in xir_prog.options: prog.run_options[""shots""] = xir_prog.options[""shots""] if ""cutoff_dim"" in xir_prog.options: prog.backend_options[""cutoff_dim""] = xir_prog.options[""cutoff_dim""] return prog " 57726,"def list_asset_groups(client: Client, args: dict) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]: """"""Lists Asset Groups command. Args: client: Client which connects to api args: arguments for the request Returns: Human Readable Entry Context Raw Data """""" human_readable_markdown = '' url_suffix = '/asset_groups?per_page=1000' response = client.http_request(message='GET', suffix=url_suffix).get('asset_groups') if response: wanted_keys = ['ID', 'Name', 'RiskMeterScore', 'AssetCount', 'FixCount'] actual_keys = ['id', 'name', 'risk_meter_score', 'asset_count', 'fix_count'] context_list = parse_response(response, wanted_keys, actual_keys) context = { 'Kenna.AssetGroups(val.ID === obj.ID)': context_list } asset_group_header = ['Name', 'ID', 'RiskMeterScore', 'AssetCount', 'FixCount'] human_readable_markdown = tableToMarkdown('Asset Groups', context_list, headers=asset_group_header) else: human_readable_markdown = ""no groups in response."" return human_readable_markdown, context, response ","def list_asset_groups(client: Client, args: dict) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]: """"""Lists Asset Groups command. Args: client: Client which connects to api args: arguments for the request Returns: Human Readable Entry Context Raw Data """""" human_readable_markdown = '' url_suffix = '/asset_groups' limit = int(args.get('limit', 500)) params = { ""per_page"": limit } response = client.http_request(message='GET', suffix=url_suffix, params=params).get('asset_groups') if response: wanted_keys = ['ID', 'Name', 'RiskMeterScore', 'AssetCount', 'FixCount'] actual_keys = ['id', 'name', 'risk_meter_score', 'asset_count', 'fix_count'] context_list = parse_response(response, wanted_keys, actual_keys) context = { 'Kenna.AssetGroups(val.ID === obj.ID)': context_list } asset_group_header = ['Name', 'ID', 'RiskMeterScore', 'AssetCount', 'FixCount'] human_readable_markdown = tableToMarkdown('Asset Groups', context_list, headers=asset_group_header) else: human_readable_markdown = ""no groups in response."" return human_readable_markdown, context, response " 45701,"def _check_inputs(vil, rainrate, velocity, timesteps, ar_order): if len(vil.shape) != 3: raise ValueError( ""vil.shape = %s, but a three-dimensional array expected"" % str(vil.shape) ) if rainrate is not None: if len(rainrate.shape) != 2: raise ValueError( ""rainrate.shape = %s, but a two-dimensional array expected"" % str(rainrate.shape) ) if vil.shape[0] != ar_order + 2: raise ValueError( ""vil.shape[0] = %d, but vil.shape[0] = ar_order + 2 = %d required"" % (vil.shape[0], ar_order + 2) ) if len(velocity.shape) != 3: raise ValueError( ""velocity.shape = %s, but a three-dimensional array expected"" % str(velocity.shape) ) if isinstance(timesteps, list) and not sorted(timesteps) == timesteps: raise ValueError(""timesteps is not in ascending order"") ","def _check_inputs(vil, rainrate, velocity, timesteps, ar_order): if len(vil.shape) != 3: raise ValueError( ""vil.shape = %s, but a three-dimensional array expected"" % str(vil.shape) ) if rainrate is not None: if rainrate.ndim != 2: raise ValueError( ""rainrate.shape = %s, but a two-dimensional array expected"" % str(rainrate.shape) ) if vil.shape[0] != ar_order + 2: raise ValueError( ""vil.shape[0] = %d, but vil.shape[0] = ar_order + 2 = %d required"" % (vil.shape[0], ar_order + 2) ) if len(velocity.shape) != 3: raise ValueError( ""velocity.shape = %s, but a three-dimensional array expected"" % str(velocity.shape) ) if isinstance(timesteps, list) and not sorted(timesteps) == timesteps: raise ValueError(""timesteps is not in ascending order"") " 14691,"def get_engine(hass, config): """"""Set up IBM Watson TTS component."""""" from ibm_watson import TextToSpeechV1 service = TextToSpeechV1( url=config.get(CONF_URL), iam_apikey=config.get(CONF_APIKEY) ) supported_languages = list({s[:5] for s in SUPPORTED_VOICES}) default_voice = config.get(CONF_VOICE) output_format = config.get(CONF_OUTPUT_FORMAT) return WatsonTTSProvider( service, supported_languages, default_voice, output_format) ","def get_engine(hass, config): """"""Set up IBM Watson TTS component."""""" from ibm_watson import TextToSpeechV1 service = TextToSpeechV1( url=config.get(CONF_URL), iam_apikey=config[CONF_APIKEY] ) supported_languages = list({s[:5] for s in SUPPORTED_VOICES}) default_voice = config.get(CONF_VOICE) output_format = config.get(CONF_OUTPUT_FORMAT) return WatsonTTSProvider( service, supported_languages, default_voice, output_format) " 10736,"def _autogenerate(): from numba.scripts.generate_lower_listing import gen_lower_listing from numba.help.inspector import write_listings basedir = os.path.dirname(__file__) gen_lower_listing(os.path.join(basedir, 'developer/autogen_lower_listing.rst')) # Run inspector on numpy for package in ['builtins', 'math', 'cmath', 'numpy']: write_listings( package_name=package, filename=os.path.join( basedir, 'developer/autogen_{}_listing'.format(package), ), output_format='rst', ) ","def _autogenerate(): from numba.scripts.generate_lower_listing import gen_lower_listing from numba.help.inspector import write_listings basedir = os.path.dirname(__file__) gen_lower_listing(os.path.join(basedir, 'developer/autogen_lower_listing.rst')) # Run inspector on supported packages for package in ['builtins', 'math', 'cmath', 'numpy']: write_listings( package_name=package, filename=os.path.join( basedir, 'developer/autogen_{}_listing'.format(package), ), output_format='rst', ) " 32092,"def set_next_fetch_run(last_run, incidents, fetch_limit, start_fetch_time, end_fetch_time, look_back, created_time_field, id_field='id', date_format='%Y-%m-%dT%H:%M:%S', save_incidents_in_last_run=False, increase_last_run_time=False): """""" Sets the next run :type last_run: ``dict`` :param last_run: The LastRun object :type incidents: ``list`` :param incidents: List of incidents :type fetch_limit: ``int`` :param fetch_limit: The fetch limit :type start_fetch_time: ``str`` :param start_fetch_time: The start time to fetch :type end_fetch_time: ``str`` :param end_fetch_time: The end time to fetch :type look_back: ``int`` :param look_back: The time to look back in fetch in minutes :type created_time_field: ``str`` :param created_time_field: The incident created time field :type id_field: ``str`` :param id_field: The incident id field :type date_format: ``str`` :param date_format: The date format :type save_incidents_in_last_run: ``bool`` :param save_incidents_in_last_run: Whether to incidents in the last run object :type increase_last_run_time: ``bool`` :param increase_last_run_time: Whether to increase the last run time with one millisecond :return: The new last run object and list of incidents :rtype: ``Tuple`` """""" found_incidents = last_run.get('found_incident_ids', {}) current_time = int(time.time()) incidents_from_limit = incidents[fetch_limit:] incidents = incidents[:fetch_limit] for incident in incidents: found_incidents[incident[id_field]] = current_time found_incidents = remove_old_incidents_ids(found_incidents, current_time, look_back) if len(incidents) == 0: new_last_run = { 'time': end_fetch_time, 'limit': fetch_limit, 'found_incident_ids': found_incidents } elif len(incidents) < fetch_limit or look_back == 0: latest_incident_fetched_time = get_latest_incident_created_time(incidents, created_time_field, date_format, increase_last_run_time) new_last_run = { 'time': latest_incident_fetched_time, 'limit': fetch_limit, 'found_incident_ids': found_incidents } else: new_last_run = { 'time': start_fetch_time, 'limit': last_run.get('limit') + fetch_limit, 'found_incident_ids': found_incidents } if save_incidents_in_last_run: new_last_run['incidents'] = incidents_from_limit return new_last_run, incidents ","def set_next_fetch_run(last_run, incidents, fetch_limit, start_fetch_time, end_fetch_time, look_back, created_time_field, id_field='id', date_format='%Y-%m-%dT%H:%M:%S', save_incidents_in_last_run=False, increase_last_run_time=False): """""" Sets the next run :type last_run: ``dict`` :param last_run: The LastRun object :type incidents: ``list`` :param incidents: List of incidents :type fetch_limit: ``int`` :param fetch_limit: The fetch limit :type start_fetch_time: ``str`` :param start_fetch_time: The start time to fetch :type end_fetch_time: ``str`` :param end_fetch_time: The end time to fetch :type look_back: ``int`` :param look_back: The time to look back in fetch in minutes :type created_time_field: ``str`` :param created_time_field: The incident created time field :type id_field: ``str`` :param id_field: The incident id field :type date_format: ``str`` :param date_format: The date format :type save_incidents_in_last_run: ``bool`` :param save_incidents_in_last_run: Whether to incidents in the last run object :type increase_last_run_time: ``bool`` :param increase_last_run_time: Whether to increase the last run time with one millisecond :return: The new last run object and list of incidents :rtype: ``Tuple`` """""" found_incidents = last_run.get('found_incident_ids', {}) current_time = int(time.time()) incidents_from_limit = incidents[fetch_limit:] incidents = incidents[:fetch_limit] for incident in incidents: found_incidents[incident[id_field]] = current_time found_incidents = remove_old_incidents_ids(found_incidents, current_time, look_back) if len(incidents) == 0: new_last_run = { 'time': end_fetch_time, 'limit': fetch_limit, 'found_incident_ids': found_incidents } elif len(incidents) <= fetch_limit or look_back == 0: latest_incident_fetched_time = get_latest_incident_created_time(incidents, created_time_field, date_format, increase_last_run_time) new_last_run = { 'time': latest_incident_fetched_time, 'limit': fetch_limit, 'found_incident_ids': found_incidents } else: new_last_run = { 'time': start_fetch_time, 'limit': last_run.get('limit') + fetch_limit, 'found_incident_ids': found_incidents } if save_incidents_in_last_run: new_last_run['incidents'] = incidents_from_limit return new_last_run, incidents " 2242,"def test_ovr_single_label_predict_proba(): base_clf = MultinomialNB(alpha=1) X, Y = iris.data, iris.target X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # Decision function only estimator. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert not hasattr(decision_only, 'predict_proba') Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) assert_almost_equal(Y_proba.sum(axis=1), 1.0) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = np.array([label.argmax() for label in Y_proba]) assert not (pred - Y_pred).any() ","def test_ovr_single_label_predict_proba(): base_clf = MultinomialNB(alpha=1) X, Y = iris.data, iris.target X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # Decision function only estimator. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert not hasattr(decision_only, 'predict_proba') Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) assert_almost_equal(Y_proba.sum(axis=1), 1.0) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba.argmax(axis=1) assert not (pred - Y_pred).any() " 26917,"def create_pod_id(dag_id: str, task_id: str) -> str: """""" Generates the kubernetes safe pod_id. Note that this is NOT the full ID that will be launched to k8s. We will add a uuid to ensure uniqueness. :param dag_id: DAG ID :param task_id: Task ID :return: The non-unique pod_id for this task/DAG pairing """""" safe_dag_id = _strip_unsafe_kubernetes_special_chars(dag_id) safe_task_id = _strip_unsafe_kubernetes_special_chars(task_id) return safe_dag_id + ""-"" + safe_task_id ","def create_pod_id(dag_id: str, task_id: str) -> str: """""" Generates the kubernetes safe pod_id. Note that this is NOT the full ID that will be launched to k8s. We will add a uuid to ensure uniqueness. :param dag_id: DAG ID :param task_id: Task ID :return: The non-unique pod_id for this task/DAG pairing """""" safe_dag_id = _strip_unsafe_kubernetes_special_chars(dag_id) safe_task_id = _strip_unsafe_kubernetes_special_chars(task_id) return f""{safe_dag_id}-{safe_task_id}"" " 24785,"def main(lst): """"""https://github.com/PyCQA/astroid/pull/1111#issuecomment-890367609"""""" try: raise ValueError except ValueError as e: pass for e in lst: pass # e will be undefined if lst is empty print(e) # [undefined-loop-variable] ","def main(lst): """"""https://github.com/PyCQA/astroid/pull/1111#issuecomment-890367609"""""" try: raise ValueError except ValueError as e: # [unused-variable] pass for e in lst: pass # e will be undefined if lst is empty print(e) # [undefined-loop-variable] " 31713,"def parse_alerts(alerts: List[Dict[str, Any]], max_fetch: int, last_fetch: datetime): incidents: List[Dict[str, Any]] = [] count = 0 updated_last_fetch = last_fetch # sorting alerts by newStateDate so the fetch will work by date and not by id alerts.sort(key=lambda a: dateparser.parse(a['newStateDate']).replace(tzinfo=utc)) for alert in alerts: if count >= max_fetch: break # ignoring microsecond because date_to_timestamp doesn't know how to handle it # which causes the last alert to be fetched every time the function is called new_state_date = dateparser.parse(alert['newStateDate']).replace(tzinfo=utc, microsecond=0) incident = parse_alert(alert, new_state_date, last_fetch) if incident: incidents.append(incident) count += 1 if new_state_date > updated_last_fetch: updated_last_fetch = new_state_date return updated_last_fetch, incidents ","def parse_alerts(alerts: List[Dict[str, Any]], max_fetch: int, last_fetch: datetime): incidents: List[Dict[str, Any]] = [] count = 0 updated_last_fetch = last_fetch # sorting alerts by newStateDate so the fetch will work by date and not by id alerts.sort(key=lambda a: dateparser.parse(a['newStateDate']).replace(tzinfo=utc)) for alert in alerts: if count >= max_fetch: break # ignoring microsecond because date_to_timestamp doesn't know how to handle it # which causes the last alert to be fetched every time the function is called new_state_date = dateparser.parse(alert['newStateDate']).replace(tzinfo=utc, microsecond=0) incident = parse_alert(alert, new_state_date, last_fetch) if incident: incidents.append(incident) count += 1 updated_last_fetch = max(updated_last_fetch, new_state_date) return updated_last_fetch, incidents "